diff --git a/CHANGELOG.md b/CHANGELOG.md index 5492a6e8ec..61b89872e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,42 @@ # Changelog +## [1.19.1](https://github.com/googleapis/python-aiplatform/compare/v1.19.0...v1.19.1) (2022-12-08) + + +### Features + +* Add explanationSpec to TrainingPipeline-based custom jobs ([957703f](https://github.com/googleapis/python-aiplatform/commit/957703f9b9c953ee1f67740a652f68279907b104)) + + +### Bug Fixes + +* Add pre-built container(tf2-gpu-2-1) to the container URI list ([cdd557e](https://github.com/googleapis/python-aiplatform/commit/cdd557e5e86b0b4d4cf401509aba5914e0bab8b7)) +* Fix bug that broke profiler with '0-rc2' tensorflow versions. ([8779df5](https://github.com/googleapis/python-aiplatform/commit/8779df5362a6851372cf3cea005a1c6c3096b19e)) +* Fixed argument name in UnmanagedContainerModel ([d876b3a](https://github.com/googleapis/python-aiplatform/commit/d876b3ad8d0129dc98de9f86567d5bf17791058b)) + + +### Documentation + +* Add a sample for create_tensorboard. ([52656ca](https://github.com/googleapis/python-aiplatform/commit/52656cac24eedd500a3d97b3d4678857b1d51ed8)) +* Fix Experiment resource name format docstring. ([f8e5842](https://github.com/googleapis/python-aiplatform/commit/f8e5842a086bcd90c3b153ffa9dc7e788650e670)) +* Fix get Experiment data frame sample ([24e1465](https://github.com/googleapis/python-aiplatform/commit/24e146551237c494349b324ee8830154d129860c)) +* Update docstrings for "data_item_labels" in dataset ([b2f8c42](https://github.com/googleapis/python-aiplatform/commit/b2f8c42d88c29010cf78a9f44fb3cdb711a1e94c)) +* Update README fix product doc link ([43a2679](https://github.com/googleapis/python-aiplatform/commit/43a2679c0d6f5cba7dff4535a03aedd84e09a2f1)) + + +### Miscellaneous Chores + +* Release 1.19.1 ([f01867f](https://github.com/googleapis/python-aiplatform/commit/f01867f697a5d5134c993283f7cf9b22717da029)) + ## [1.19.0](https://github.com/googleapis/python-aiplatform/compare/v1.18.3...v1.19.0) (2022-11-17) ### Features * Add Feature Store: Streaming Ingestion (write_feature_values()) and introduce Preview namespace to Vertex SDK ([bae0315](https://github.com/googleapis/python-aiplatform/commit/bae03158c06865d1b61c06a1c8af64e876ce76dd)) +* Add bq_dataset_id parameter to batch_serve_to_df ([bb72562](https://github.com/googleapis/python-aiplatform/commit/bb72562f4515b6ace73a735477584ca0b5a30f58)) +* Add annotation_labels to ImportDataConfig in aiplatform v1 dataset.proto ([43e2805](https://github.com/googleapis/python-aiplatform/commit/43e28052d798c380de6e102edbe257a0100738cd)) * Add support for ordery_by in Metadata SDK list methods for Artifact, Execution and Context. ([2377606](https://github.com/googleapis/python-aiplatform/commit/23776066909b5b7f77f704722d2719e1a1733ad4)) * Support global network parameter. ([c7f57ad](https://github.com/googleapis/python-aiplatform/commit/c7f57ad505b7251b9c663538e2312998445db691)) diff --git a/docs/README.rst b/docs/README.rst index f1c894550c..1288053eb2 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -16,7 +16,7 @@ Vertex AI SDK for Python :target: https://pypi.org/project/google-cloud-aiplatform/ .. _Vertex AI: https://cloud.google.com/vertex-ai/docs .. _Client Library Documentation: https://googleapis.dev/python/aiplatform/latest -.. _Product Documentation: https://cloud.google.com/ai-platform/docs +.. _Product Documentation: https://cloud.google.com/vertex-ai/docs Quick Start diff --git a/google/cloud/aiplatform/_pipeline_based_service/pipeline_based_service.py b/google/cloud/aiplatform/_pipeline_based_service/pipeline_based_service.py index 98e647d029..2ef1db5ad5 100644 --- a/google/cloud/aiplatform/_pipeline_based_service/pipeline_based_service.py +++ b/google/cloud/aiplatform/_pipeline_based_service/pipeline_based_service.py @@ -391,13 +391,21 @@ def list( for pipeline_execution in filtered_pipeline_executions: if "pipeline_job_resource_name" in pipeline_execution.metadata: - service_pipeline_job = cls( - pipeline_execution.metadata["pipeline_job_resource_name"], - project=project, - location=location, - credentials=credentials, - ) - service_pipeline_jobs.append(service_pipeline_job) + # This is wrapped in a try/except for cases when both + # `_coponent_identifier` and `_template_name_identifier` are + # set. In that case, even though all pipelines returned by the + # Execution.list() call will match the `_component_identifier`, + # some may not match the `_template_name_identifier` + try: + service_pipeline_job = cls( + pipeline_execution.metadata["pipeline_job_resource_name"], + project=project, + location=location, + credentials=credentials, + ) + service_pipeline_jobs.append(service_pipeline_job) + except ValueError: + continue return service_pipeline_jobs diff --git a/google/cloud/aiplatform/constants/prediction.py b/google/cloud/aiplatform/constants/prediction.py index 76e4875a20..52a69ab692 100644 --- a/google/cloud/aiplatform/constants/prediction.py +++ b/google/cloud/aiplatform/constants/prediction.py @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -79,6 +79,12 @@ ] TF_CONTAINER_URIS = [ + "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-10:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-10:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-10:latest", + "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-10:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-10:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-10:latest", "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-9:latest", "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-9:latest", "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-9:latest", @@ -130,6 +136,9 @@ "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-1:latest", "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-1:latest", "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-1:latest", + "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-1:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-1:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-1:latest", "us-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest", "europe-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest", "asia-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest", diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index c1f1c9530e..a44d21d784 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -170,6 +170,11 @@ def create( labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. + This arg is not for specifying the annotation name or the + training target of your data, but for some global labels of + the dataset. E.g., + 'data_item_labels={"aiplatform.googleapis.com/ml_use":"training"}' + specifies that all the uploaded data are used for training. project (str): Project to upload this dataset to. Overrides project set in aiplatform.init. @@ -528,6 +533,11 @@ def import_data( labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. + This arg is not for specifying the annotation name or the + training target of your data, but for some global labels of + the dataset. E.g., + 'data_item_labels={"aiplatform.googleapis.com/ml_use":"training"}' + specifies that all the uploaded data are used for training. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will diff --git a/google/cloud/aiplatform/explain/__init__.py b/google/cloud/aiplatform/explain/__init__.py index 4701d709b5..8167e80a4a 100644 --- a/google/cloud/aiplatform/explain/__init__.py +++ b/google/cloud/aiplatform/explain/__init__.py @@ -35,6 +35,8 @@ ExplanationParameters = explanation_compat.ExplanationParameters FeatureNoiseSigma = explanation_compat.FeatureNoiseSigma +ExplanationSpec = explanation_compat.ExplanationSpec + # Classes used by ExplanationParameters IntegratedGradientsAttribution = explanation_compat.IntegratedGradientsAttribution SampledShapleyAttribution = explanation_compat.SampledShapleyAttribution @@ -44,6 +46,7 @@ __all__ = ( "Encoding", + "ExplanationSpec", "ExplanationMetadata", "ExplanationParameters", "FeatureNoiseSigma", diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index f33ba416ad..09f8260b0c 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -56,6 +56,10 @@ from google.cloud.aiplatform.utils import source_utils from google.cloud.aiplatform.utils import worker_spec_utils +from google.cloud.aiplatform_v1.types import ( + batch_prediction_job as batch_prediction_job_v1, +) +from google.cloud.aiplatform_v1.types import custom_job as custom_job_v1 _LOGGER = base.Logger(__name__) @@ -331,7 +335,7 @@ def __init__( @property def output_info( self, - ) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInfo]: + ) -> Optional[batch_prediction_job_v1.BatchPredictionJob.OutputInfo]: """Information describing the output of this job, including output location into which prediction output is written. @@ -1121,7 +1125,7 @@ def __init__( self, # TODO(b/223262536): Make display_name parameter fully optional in next major release display_name: str, - worker_pool_specs: Union[List[Dict], List[aiplatform.gapic.WorkerPoolSpec]], + worker_pool_specs: Union[List[Dict], List[custom_job_v1.WorkerPoolSpec]], base_output_dir: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto index 158b0f146a..f37fde1fb6 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package google.cloud.aiplatform.container.v1beta1; +package google.cloud.aiplatform.container.v1; import "google/rpc/status.proto"; @@ -14,6 +14,10 @@ service MatchService { // Returns the nearest neighbors for batch queries. If it is a sharded // deployment, calls the other shards and aggregates the responses. rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {} + + // Looks up the embeddings. + rpc BatchGetEmbeddings(BatchGetEmbeddingsRequest) + returns (BatchGetEmbeddingsResponse) {} } // Parameters for a match query. @@ -56,6 +60,28 @@ message MatchRequest { // not set or set to 0.0, query uses the default value specified in // NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent. int32 leaf_nodes_to_search_percent_override = 7; + + // If set to true, besides the doc id, query result will also include the + // embedding. Set this value may impact the query performance (e.g, increase + // query latency, etc). + bool embedding_enabled = 8; +} + +// Embedding on query result. +message Embedding { + // The id of the matched neighbor. + string id = 1; + + // The embedding values. + repeated float float_val = 2; + + // The list of restricts. + repeated Namespace restricts = 3; + + // The attribute value used for crowding. The maximum number of neighbors + // to return per crowding attribute value + // (per_crowding_attribute_num_neighbors) is configured per-query. + int64 crowding_attribute = 4; } // Response of a match query. @@ -66,9 +92,32 @@ message MatchResponse { // The distances of the matches. double distance = 2; + + // If crowding is enabled, the crowding attribute of this neighbor will + // be stored here. + int64 crowding_attribute = 3; } // All its neighbors. repeated Neighbor neighbor = 1; + + // Embedding values for all returned neighbors. + // This is only set when query.embedding_enabled is set to true. + repeated Embedding embeddings = 2; +} + +// Request of a Batch Get Embeddings query. +message BatchGetEmbeddingsRequest { + // The ID of the DeploydIndex that will serve the request. + string deployed_index_id = 1; + + // The ids to be looked up. + repeated string id = 2; +} + +// Response of a Batch Get Embeddings query. +message BatchGetEmbeddingsResponse { + // Embedding values for all ids in the query request. + repeated Embedding embeddings = 1; } // Parameters for a batch match query. diff --git a/google/cloud/aiplatform/metadata/experiment_resources.py b/google/cloud/aiplatform/metadata/experiment_resources.py index 908e30561a..75c7854adc 100644 --- a/google/cloud/aiplatform/metadata/experiment_resources.py +++ b/google/cloud/aiplatform/metadata/experiment_resources.py @@ -97,18 +97,20 @@ def __init__( ``` Args: - experiment_name (str): Required. The name or resource name of this experiment. + experiment_name (str): + Required. The name or resource name of this experiment. - Resource name is of the format: projects/123/locations/us-central1/experiments/my-experiment + Resource name is of the format: + `projects/123/locations/us-central1/metadataStores/default/contexts/my-experiment` project (str): - Optional. Project where this experiment is located. Overrides project set in - aiplatform.init. + Optional. Project where this experiment is located. Overrides + project set in aiplatform.init. location (str): - Optional. Location where this experiment is located. Overrides location set in - aiplatform.init. + Optional. Location where this experiment is located. Overrides + location set in aiplatform.init. credentials (auth_credentials.Credentials): - Optional. Custom credentials used to retrieve this experiment. Overrides - credentials set in aiplatform.init. + Optional. Custom credentials used to retrieve this experiment. + Overrides credentials set in aiplatform.init. """ metadata_args = dict( diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index d103a79733..92c484f34f 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -22,7 +22,6 @@ from google.protobuf import timestamp_pb2 from google.cloud.aiplatform import base -from google.cloud.aiplatform import gapic from google.cloud.aiplatform import pipeline_jobs from google.cloud.aiplatform.compat.types import execution as gca_execution from google.cloud.aiplatform.metadata import constants @@ -32,6 +31,8 @@ from google.cloud.aiplatform.metadata import experiment_run_resource from google.cloud.aiplatform.tensorboard import tensorboard_resource +from google.cloud.aiplatform_v1.types import execution as execution_v1 + _LOGGER = base.Logger(__name__) @@ -302,7 +303,9 @@ def start_run( if tensorboard: self._experiment_run.assign_backing_tensorboard(tensorboard=tensorboard) - self._experiment_run.update_state(state=gapic.Execution.State.RUNNING) + self._experiment_run.update_state( + state=execution_v1.Execution.State.RUNNING + ) else: self._experiment_run = experiment_run_resource.ExperimentRun.create( @@ -311,7 +314,10 @@ def start_run( return self._experiment_run - def end_run(self, state: gapic.Execution.State = gapic.Execution.State.COMPLETE): + def end_run( + self, + state: execution_v1.Execution.State = execution_v1.Execution.State.COMPLETE, + ): """Ends the the current experiment run. ``` diff --git a/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py b/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py index 4941e42480..264eff9168 100644 --- a/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py +++ b/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py @@ -222,7 +222,7 @@ class UnmanagedContainerModel(base_artifact.BaseArtifactSchema): def __init__( self, *, - predict_schema_ta: utils.PredictSchemata, + predict_schemata: utils.PredictSchemata, container_spec: utils.ContainerSpec, artifact_id: Optional[str] = None, uri: Optional[str] = None, @@ -233,7 +233,7 @@ def __init__( state: Optional[gca_artifact.Artifact.State] = gca_artifact.Artifact.State.LIVE, ): """Args: - predict_schema_ta (PredictSchemata): + predict_schemata (PredictSchemata): An instance of PredictSchemata which holds instance, parameter and prediction schema uris. container_spec (ContainerSpec): An instance of ContainerSpec which holds the container configuration for the model. @@ -262,7 +262,7 @@ def __init__( check the validity of state transitions. """ extended_metadata = copy.deepcopy(metadata) if metadata else {} - extended_metadata["predictSchemata"] = predict_schema_ta.to_dict() + extended_metadata["predictSchemata"] = predict_schemata.to_dict() extended_metadata["containerSpec"] = container_spec.to_dict() super(UnmanagedContainerModel, self).__init__( diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 222f7df1a8..db0093a04f 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -47,8 +47,8 @@ from google.cloud.aiplatform import models from google.cloud.aiplatform import utils from google.cloud.aiplatform.utils import gcs_utils +from google.cloud.aiplatform.utils import _explanation_utils from google.cloud.aiplatform import model_evaluation - from google.cloud.aiplatform.compat.services import endpoint_service_client from google.cloud.aiplatform.compat.types import ( @@ -66,6 +66,8 @@ prediction as prediction_constants, ) +from google.cloud.aiplatform_v1.types import model as model_v1 + from google.protobuf import field_mask_pb2, timestamp_pb2 from google.protobuf import json_format @@ -617,10 +619,6 @@ def _validate_deploy_args( deployed_model_display_name: Optional[str], traffic_split: Optional[Dict[str, int]], traffic_percentage: Optional[int], - explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, - explanation_parameters: Optional[ - aiplatform.explain.ExplanationParameters - ] = None, ): """Helper method to validate deploy arguments. @@ -663,20 +661,10 @@ def _validate_deploy_args( not be provided. Traffic of previously deployed models at the endpoint will be scaled down to accommodate new deployed model's traffic. Should not be provided if traffic_split is provided. - explanation_metadata (aiplatform.explain.ExplanationMetadata): - Optional. Metadata describing the Model's input and output for explanation. - `explanation_metadata` is optional while `explanation_parameters` must be - specified when used. - For more details, see `Ref docs ` - explanation_parameters (aiplatform.explain.ExplanationParameters): - Optional. Parameters to configure explaining for Model's predictions. - For more details, see `Ref docs ` Raises: ValueError: if Min or Max replica is negative. Traffic percentage > 100 or < 0. Or if traffic_split does not sum to 100. - ValueError: if explanation_metadata is specified while explanation_parameters - is not. """ if min_replica_count < 0: raise ValueError("Min replica cannot be negative.") @@ -697,11 +685,6 @@ def _validate_deploy_args( "Sum of all traffic within traffic split needs to be 100." ) - if bool(explanation_metadata) and not bool(explanation_parameters): - raise ValueError( - "To get model explanation, `explanation_parameters` must be specified." - ) - # Raises ValueError if invalid accelerator if accelerator_type: utils.validate_accelerator_type(accelerator_type) @@ -817,6 +800,9 @@ def deploy( deployed_model_display_name=deployed_model_display_name, traffic_split=traffic_split, traffic_percentage=traffic_percentage, + ) + + explanation_spec = _explanation_utils.create_and_validate_explanation_spec( explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, ) @@ -832,8 +818,7 @@ def deploy( accelerator_type=accelerator_type, accelerator_count=accelerator_count, service_account=service_account, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, + explanation_spec=explanation_spec, metadata=metadata, sync=sync, deploy_request_timeout=deploy_request_timeout, @@ -854,10 +839,7 @@ def _deploy( accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, service_account: Optional[str] = None, - explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, - explanation_parameters: Optional[ - aiplatform.explain.ExplanationParameters - ] = None, + explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), sync=True, deploy_request_timeout: Optional[float] = None, @@ -919,14 +901,8 @@ def _deploy( to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. - explanation_metadata (aiplatform.explain.ExplanationMetadata): - Optional. Metadata describing the Model's input and output for explanation. - `explanation_metadata` is optional while `explanation_parameters` must be - specified when used. - For more details, see `Ref docs ` - explanation_parameters (aiplatform.explain.ExplanationParameters): - Optional. Parameters to configure explaining for Model's predictions. - For more details, see `Ref docs ` + explanation_spec (aiplatform.explain.ExplanationSpec): + Optional. Specification of Model explanation. metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. @@ -963,8 +939,7 @@ def _deploy( accelerator_type=accelerator_type, accelerator_count=accelerator_count, service_account=service_account, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, + explanation_spec=explanation_spec, metadata=metadata, deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, @@ -992,10 +967,7 @@ def _deploy_call( accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, service_account: Optional[str] = None, - explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, - explanation_parameters: Optional[ - aiplatform.explain.ExplanationParameters - ] = None, + explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, @@ -1066,14 +1038,8 @@ def _deploy_call( to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. - explanation_metadata (aiplatform.explain.ExplanationMetadata): - Optional. Metadata describing the Model's input and output for explanation. - `explanation_metadata` is optional while `explanation_parameters` must be - specified when used. - For more details, see `Ref docs ` - explanation_parameters (aiplatform.explain.ExplanationParameters): - Optional. Parameters to configure explaining for Model's predictions. - For more details, see `Ref docs ` + explanation_spec (aiplatform.explain.ExplanationSpec): + Optional. Specification of Model explanation. metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. @@ -1199,13 +1165,7 @@ def _deploy_call( "See https://cloud.google.com/vertex-ai/docs/reference/rpc/google.cloud.aiplatform.v1#google.cloud.aiplatform.v1.Model.FIELDS.repeated.google.cloud.aiplatform.v1.Model.DeploymentResourcesType.google.cloud.aiplatform.v1.Model.supported_deployment_resources_types" ) - # Service will throw error if explanation_parameters is not provided - if explanation_parameters: - explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec() - explanation_spec.parameters = explanation_parameters - if explanation_metadata: - explanation_spec.metadata = explanation_metadata - deployed_model.explanation_spec = explanation_spec + deployed_model.explanation_spec = explanation_spec # Checking if traffic percentage is valid # TODO(b/221059294) PrivateEndpoint should support traffic split @@ -2332,6 +2292,9 @@ def deploy( deployed_model_display_name=deployed_model_display_name, traffic_split=None, traffic_percentage=100, + ) + + explanation_spec = _explanation_utils.create_and_validate_explanation_spec( explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, ) @@ -2347,8 +2310,7 @@ def deploy( accelerator_type=accelerator_type, accelerator_count=accelerator_count, service_account=service_account, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, + explanation_spec=explanation_spec, metadata=metadata, sync=sync, ) @@ -2463,7 +2425,7 @@ def supported_export_formats( @property def supported_deployment_resources_types( self, - ) -> List[aiplatform.gapic.Model.DeploymentResourcesType]: + ) -> List[model_v1.Model.DeploymentResourcesType]: """List of deployment resource types accepted for this Model. When this Model is deployed, its prediction resources are described by @@ -2519,7 +2481,7 @@ def supported_output_storage_formats(self) -> List[str]: return list(self._gca_resource.supported_output_storage_formats) @property - def predict_schemata(self) -> Optional[aiplatform.gapic.PredictSchemata]: + def predict_schemata(self) -> Optional[model_v1.PredictSchemata]: """The schemata that describe formats of the Model's predictions and explanations, if available.""" self._assert_gca_resource_is_available() @@ -2552,7 +2514,7 @@ def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]: ) @property - def container_spec(self) -> Optional[aiplatform.gapic.ModelContainerSpec]: + def container_spec(self) -> Optional[model_v1.ModelContainerSpec]: """The specification of the container that is to be used when deploying this Model. Not present for AutoML Models.""" self._assert_gca_resource_is_available() @@ -3004,11 +2966,6 @@ def upload( if labels: utils.validate_labels(labels) - if bool(explanation_metadata) and not bool(explanation_parameters): - raise ValueError( - "To get model explanation, `explanation_parameters` must be specified." - ) - appended_user_agent = None if local_model: container_spec = local_model.get_serving_container_spec() @@ -3109,13 +3066,12 @@ def upload( if artifact_uri: managed_model.artifact_uri = artifact_uri - # Override explanation_spec if required field is provided - if explanation_parameters: - explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec() - explanation_spec.parameters = explanation_parameters - if explanation_metadata: - explanation_spec.metadata = explanation_metadata - managed_model.explanation_spec = explanation_spec + managed_model.explanation_spec = ( + _explanation_utils.create_and_validate_explanation_spec( + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, + ) + ) request = gca_model_service_compat.UploadModelRequest( parent=initializer.global_config.common_location_path(project, location), @@ -3283,8 +3239,6 @@ def deploy( deployed_model_display_name=deployed_model_display_name, traffic_split=traffic_split, traffic_percentage=traffic_percentage, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, ) if isinstance(endpoint, PrivateEndpoint): @@ -3295,6 +3249,11 @@ def deploy( "A maximum of one model can be deployed to each private Endpoint." ) + explanation_spec = _explanation_utils.create_and_validate_explanation_spec( + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, + ) + return self._deploy( endpoint=endpoint, deployed_model_display_name=deployed_model_display_name, @@ -3306,8 +3265,7 @@ def deploy( accelerator_type=accelerator_type, accelerator_count=accelerator_count, service_account=service_account, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, + explanation_spec=explanation_spec, metadata=metadata, encryption_spec_key_name=encryption_spec_key_name or initializer.global_config.encryption_spec_key_name, @@ -3331,10 +3289,7 @@ def _deploy( accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, service_account: Optional[str] = None, - explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, - explanation_parameters: Optional[ - aiplatform.explain.ExplanationParameters - ] = None, + explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, network: Optional[str] = None, @@ -3398,14 +3353,8 @@ def _deploy( to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. - explanation_metadata (aiplatform.explain.ExplanationMetadata): - Optional. Metadata describing the Model's input and output for explanation. - `explanation_metadata` is optional while `explanation_parameters` must be - specified when used. - For more details, see `Ref docs ` - explanation_parameters (aiplatform.explain.ExplanationParameters): - Optional. Parameters to configure explaining for Model's predictions. - For more details, see `Ref docs ` + explanation_spec (aiplatform.explain.ExplanationSpec): + Optional. Specification of Model explanation. metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. @@ -3483,8 +3432,7 @@ def _deploy( accelerator_type=accelerator_type, accelerator_count=accelerator_count, service_account=service_account, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, + explanation_spec=explanation_spec, metadata=metadata, deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, diff --git a/google/cloud/aiplatform/releases.txt b/google/cloud/aiplatform/releases.txt index c625e5f9fe..861c301c34 100644 --- a/google/cloud/aiplatform/releases.txt +++ b/google/cloud/aiplatform/releases.txt @@ -1,4 +1,4 @@ Use this file when you need to force a patch release with release-please. Edit line 4 below with the version for the release. - \ No newline at end of file +1.19.1 \ No newline at end of file diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 4b394dabdc..e8aa9c0f3d 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -25,6 +25,7 @@ from google.cloud.aiplatform import base from google.cloud.aiplatform.constants import base as constants from google.cloud.aiplatform import datasets +from google.cloud.aiplatform import explain from google.cloud.aiplatform import initializer from google.cloud.aiplatform import models from google.cloud.aiplatform import jobs @@ -32,17 +33,21 @@ from google.cloud.aiplatform import utils from google.cloud.aiplatform.utils import console_utils +from google.cloud.aiplatform.compat.types import env_var as gca_env_var +from google.cloud.aiplatform.compat.types import io as gca_io +from google.cloud.aiplatform.compat.types import model as gca_model from google.cloud.aiplatform.compat.types import ( - env_var as gca_env_var, - io as gca_io, - model as gca_model, pipeline_state as gca_pipeline_state, +) +from google.cloud.aiplatform.compat.types import ( training_pipeline as gca_training_pipeline, ) + from google.cloud.aiplatform.utils import _timestamped_gcs_dir from google.cloud.aiplatform.utils import source_utils from google.cloud.aiplatform.utils import worker_spec_utils from google.cloud.aiplatform.utils import column_transformations_utils +from google.cloud.aiplatform.utils import _explanation_utils from google.cloud.aiplatform.v1.schema.trainingjob import ( definition_v1 as training_job_inputs, @@ -1093,6 +1098,8 @@ def __init__( model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, + explanation_metadata: Optional[explain.ExplanationMetadata] = None, + explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, @@ -1194,6 +1201,15 @@ def __init__( and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + explanation_metadata (explain.ExplanationMetadata): + Optional. Metadata describing the Model's input and output for + explanation. `explanation_metadata` is optional while + `explanation_parameters` must be specified when used. + For more details, see `Ref docs ` + explanation_parameters (explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. + For more details, see `Ref docs ` project (str): Project to run training in. Overrides project set in aiplatform.init. location (str): @@ -1312,6 +1328,10 @@ def __init__( "set using aiplatform.init(staging_bucket='gs://my-bucket')" ) + # Save explanationSpec as instance attributes + self._explanation_metadata = explanation_metadata + self._explanation_parameters = explanation_parameters + # Backing Custom Job resource is not known until after data preprocessing # once Custom Job is known we log the console uri and the tensorboard uri # this flags keeps that state so we don't log it multiple times @@ -1439,6 +1459,12 @@ def _prepare_and_validate_run( managed_model.labels = model_labels else: managed_model.labels = self._labels + managed_model.explanation_spec = ( + _explanation_utils.create_and_validate_explanation_spec( + explanation_metadata=self._explanation_metadata, + explanation_parameters=self._explanation_parameters, + ) + ) else: managed_model = None @@ -2608,6 +2634,8 @@ def __init__( model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, + explanation_metadata: Optional[explain.ExplanationMetadata] = None, + explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, @@ -2745,6 +2773,15 @@ def __init__( and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + explanation_metadata (explain.ExplanationMetadata): + Optional. Metadata describing the Model's input and output for + explanation. `explanation_metadata` is optional while + `explanation_parameters` must be specified when used. + For more details, see `Ref docs ` + explanation_parameters (explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. + For more details, see `Ref docs ` project (str): Project to run training in. Overrides project set in aiplatform.init. location (str): @@ -2813,6 +2850,8 @@ def __init__( model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_description=model_description, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, staging_bucket=staging_bucket, ) @@ -3529,6 +3568,8 @@ def __init__( model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, + explanation_metadata: Optional[explain.ExplanationMetadata] = None, + explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, @@ -3665,6 +3706,15 @@ def __init__( and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + explanation_metadata (explain.ExplanationMetadata): + Optional. Metadata describing the Model's input and output for + explanation. `explanation_metadata` is optional while + `explanation_parameters` must be specified when used. + For more details, see `Ref docs ` + explanation_parameters (explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. + For more details, see `Ref docs ` project (str): Project to run training in. Overrides project set in aiplatform.init. location (str): @@ -3733,6 +3783,8 @@ def __init__( model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_description=model_description, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, staging_bucket=staging_bucket, ) @@ -5777,6 +5829,8 @@ def __init__( model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, + explanation_metadata: Optional[explain.ExplanationMetadata] = None, + explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, @@ -5918,6 +5972,15 @@ def __init__( and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + explanation_metadata (explain.ExplanationMetadata): + Optional. Metadata describing the Model's input and output for + explanation. `explanation_metadata` is optional while + `explanation_parameters` must be specified when used. + For more details, see `Ref docs ` + explanation_parameters (explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. + For more details, see `Ref docs ` project (str): Project to run training in. Overrides project set in aiplatform.init. location (str): @@ -5986,6 +6049,8 @@ def __init__( model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_description=model_description, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, staging_bucket=staging_bucket, ) diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py index 81b43145b3..e532db4222 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py @@ -17,11 +17,15 @@ """A plugin to handle remote tensoflow profiler sessions for Vertex AI.""" -from google.cloud.aiplatform.training_utils.cloud_profiler import cloud_profiler_utils +from google.cloud.aiplatform.training_utils.cloud_profiler import ( + cloud_profiler_utils, +) try: import tensorflow as tf - from tensorboard_plugin_profile.profile_plugin import ProfilePlugin + from tensorboard_plugin_profile.profile_plugin import ( + ProfilePlugin, + ) except ImportError as err: raise ImportError(cloud_profiler_utils.import_error_msg) from err @@ -36,10 +40,14 @@ import tensorboard.plugins.base_plugin as tensorboard_base_plugin from werkzeug import Response -from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader +from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import ( + profile_uploader, +) from google.cloud.aiplatform.training_utils import environment_variables from google.cloud.aiplatform.training_utils.cloud_profiler import wsgi_types -from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import base_plugin +from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import ( + base_plugin, +) from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import ( tensorboard_api, ) @@ -68,8 +76,7 @@ def _get_tf_versioning() -> Optional[Version]: versioning = version.split(".") if len(versioning) != 3: return - - return Version(int(versioning[0]), int(versioning[1]), int(versioning[2])) + return Version(int(versioning[0]), int(versioning[1]), versioning[2]) def _is_compatible_version(version: Version) -> bool: @@ -228,7 +235,7 @@ def warn_tensorboard_env_var(var_name: str): Required. The name of the missing environment variable. """ logging.warning( - f"Environment variable `{var_name}` must be set. " + _BASE_TB_ENV_WARNING + "Environment variable `%s` must be set. %s", var_name, _BASE_TB_ENV_WARNING ) diff --git a/google/cloud/aiplatform/utils/_explanation_utils.py b/google/cloud/aiplatform/utils/_explanation_utils.py new file mode 100644 index 0000000000..6dd87b30e5 --- /dev/null +++ b/google/cloud/aiplatform/utils/_explanation_utils.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Optional + +from google.cloud.aiplatform import explain +from google.cloud.aiplatform.compat.types import ( + endpoint as gca_endpoint_compat, +) + + +def create_and_validate_explanation_spec( + explanation_metadata: Optional[explain.ExplanationMetadata] = None, + explanation_parameters: Optional[explain.ExplanationParameters] = None, +) -> Optional[explain.ExplanationSpec]: + """Validates the parameters needed to create explanation_spec and creates it. + + Args: + explanation_metadata (explain.ExplanationMetadata): + Optional. Metadata describing the Model's input and output for + explanation. `explanation_metadata` is optional while + `explanation_parameters` must be specified when used. + For more details, see `Ref docs ` + explanation_parameters (explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. + For more details, see `Ref docs ` + + Returns: + explanation_spec: Specification of Model explanation. + + Raises: + ValueError: If `explanation_metadata` is given, but + `explanation_parameters` is omitted. `explanation_metadata` is optional + while `explanation_parameters` must be specified when used. + """ + if bool(explanation_metadata) and not bool(explanation_parameters): + raise ValueError( + "To get model explanation, `explanation_parameters` must be specified." + ) + + if explanation_parameters: + explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec() + explanation_spec.parameters = explanation_parameters + if explanation_metadata: + explanation_spec.metadata = explanation_metadata + return explanation_spec + + return None diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 92e84b924b..eb877ff1c3 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.19.0" +__version__ = "1.19.1" diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index b60f8a61af..1bbe46e6a5 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -142,6 +142,9 @@ from .types.featurestore_online_service import ReadFeatureValuesRequest from .types.featurestore_online_service import ReadFeatureValuesResponse from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_online_service import WriteFeatureValuesPayload +from .types.featurestore_online_service import WriteFeatureValuesRequest +from .types.featurestore_online_service import WriteFeatureValuesResponse from .types.featurestore_service import BatchCreateFeaturesOperationMetadata from .types.featurestore_service import BatchCreateFeaturesRequest from .types.featurestore_service import BatchCreateFeaturesResponse @@ -986,6 +989,9 @@ "Value", "VizierServiceClient", "WorkerPoolSpec", + "WriteFeatureValuesPayload", + "WriteFeatureValuesRequest", + "WriteFeatureValuesResponse", "WriteTensorboardExperimentDataRequest", "WriteTensorboardExperimentDataResponse", "WriteTensorboardRunDataRequest", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 865ac4781a..f59af110fc 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -227,6 +227,11 @@ "methods": [ "streaming_read_feature_values" ] + }, + "WriteFeatureValues": { + "methods": [ + "write_feature_values" + ] } } }, @@ -242,6 +247,11 @@ "methods": [ "streaming_read_feature_values" ] + }, + "WriteFeatureValues": { + "methods": [ + "write_feature_values" + ] } } } diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index e54231cfd5..86f40ccd64 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -471,6 +471,136 @@ async def sample_streaming_read_feature_values(): # Done; return the response. return response + async def write_feature_values( + self, + request: Union[ + featurestore_online_service.WriteFeatureValuesRequest, dict + ] = None, + *, + entity_type: str = None, + payloads: Sequence[ + featurestore_online_service.WriteFeatureValuesPayload + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.WriteFeatureValuesResponse: + r"""Writes Feature values of one or more entities of an + EntityType. + The Feature values are merged into existing entities if + any. The Feature values to be written must have + timestamp within the online storage retention. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_write_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + payloads = aiplatform_v1.WriteFeatureValuesPayload() + payloads.entity_id = "entity_id_value" + + request = aiplatform_v1.WriteFeatureValuesRequest( + entity_type="entity_type_value", + payloads=payloads, + ) + + # Make the request + response = await client.write_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entities being written. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payloads (:class:`Sequence[google.cloud.aiplatform_v1.types.WriteFeatureValuesPayload]`): + Required. The entities to be written. Up to 100,000 + feature values can be written across all ``payloads``. + + This corresponds to the ``payloads`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, payloads]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_online_service.WriteFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if payloads: + request.payloads.extend(payloads) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: operations_pb2.ListOperationsRequest = None, diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py index 96ad455e9f..13bbd5cb44 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -685,6 +685,138 @@ def sample_streaming_read_feature_values(): # Done; return the response. return response + def write_feature_values( + self, + request: Union[ + featurestore_online_service.WriteFeatureValuesRequest, dict + ] = None, + *, + entity_type: str = None, + payloads: Sequence[ + featurestore_online_service.WriteFeatureValuesPayload + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.WriteFeatureValuesResponse: + r"""Writes Feature values of one or more entities of an + EntityType. + The Feature values are merged into existing entities if + any. The Feature values to be written must have + timestamp within the online storage retention. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_write_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + payloads = aiplatform_v1.WriteFeatureValuesPayload() + payloads.entity_id = "entity_id_value" + + request = aiplatform_v1.WriteFeatureValuesRequest( + entity_type="entity_type_value", + payloads=payloads, + ) + + # Make the request + response = client.write_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entities being written. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payloads (Sequence[google.cloud.aiplatform_v1.types.WriteFeatureValuesPayload]): + Required. The entities to be written. Up to 100,000 + feature values can be written across all ``payloads``. + + This corresponds to the ``payloads`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, payloads]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.WriteFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, featurestore_online_service.WriteFeatureValuesRequest + ): + request = featurestore_online_service.WriteFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if payloads is not None: + request.payloads = payloads + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self): return self diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py index 7f95e26363..e6cc120512 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -141,6 +141,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.write_feature_values: gapic_v1.method.wrap_method( + self.write_feature_values, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -176,6 +181,18 @@ def streaming_read_feature_values( ]: raise NotImplementedError() + @property + def write_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.WriteFeatureValuesRequest], + Union[ + featurestore_online_service.WriteFeatureValuesResponse, + Awaitable[featurestore_online_service.WriteFeatureValuesResponse], + ], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py index 0427689fc1..868e4f6f03 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -300,6 +300,39 @@ def streaming_read_feature_values( ) return self._stubs["streaming_read_feature_values"] + @property + def write_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.WriteFeatureValuesRequest], + featurestore_online_service.WriteFeatureValuesResponse, + ]: + r"""Return a callable for the write feature values method over gRPC. + + Writes Feature values of one or more entities of an + EntityType. + The Feature values are merged into existing entities if + any. The Feature values to be written must have + timestamp within the online storage retention. + + Returns: + Callable[[~.WriteFeatureValuesRequest], + ~.WriteFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_feature_values" not in self._stubs: + self._stubs["write_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/WriteFeatureValues", + request_serializer=featurestore_online_service.WriteFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.WriteFeatureValuesResponse.deserialize, + ) + return self._stubs["write_feature_values"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py index 80c786b63d..2d0d2c0a69 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -303,6 +303,39 @@ def streaming_read_feature_values( ) return self._stubs["streaming_read_feature_values"] + @property + def write_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.WriteFeatureValuesRequest], + Awaitable[featurestore_online_service.WriteFeatureValuesResponse], + ]: + r"""Return a callable for the write feature values method over gRPC. + + Writes Feature values of one or more entities of an + EntityType. + The Feature values are merged into existing entities if + any. The Feature values to be written must have + timestamp within the online storage retention. + + Returns: + Callable[[~.WriteFeatureValuesRequest], + Awaitable[~.WriteFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_feature_values" not in self._stubs: + self._stubs["write_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/WriteFeatureValues", + request_serializer=featurestore_online_service.WriteFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.WriteFeatureValuesResponse.deserialize, + ) + return self._stubs["write_feature_values"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index dfc219be05..252397dfee 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -330,8 +330,8 @@ async def sample_create_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -445,9 +445,9 @@ async def sample_get_tensorboard(): Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. + region of a Google Cloud project. If + needed users can also create extra + Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -573,8 +573,8 @@ async def sample_update_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py index f9ef55f177..9b31d03ba5 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -606,8 +606,8 @@ def sample_create_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -721,9 +721,9 @@ def sample_get_tensorboard(): Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. + region of a Google Cloud project. If + needed users can also create extra + Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -849,8 +849,8 @@ def sample_update_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index b8dd87c849..b37161ab49 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -160,6 +160,9 @@ ReadFeatureValuesRequest, ReadFeatureValuesResponse, StreamingReadFeatureValuesRequest, + WriteFeatureValuesPayload, + WriteFeatureValuesRequest, + WriteFeatureValuesResponse, ) from .featurestore_service import ( BatchCreateFeaturesOperationMetadata, @@ -695,6 +698,9 @@ "ReadFeatureValuesRequest", "ReadFeatureValuesResponse", "StreamingReadFeatureValuesRequest", + "WriteFeatureValuesPayload", + "WriteFeatureValuesRequest", + "WriteFeatureValuesResponse", "BatchCreateFeaturesOperationMetadata", "BatchCreateFeaturesRequest", "BatchCreateFeaturesResponse", diff --git a/google/cloud/aiplatform_v1/types/annotation_spec.py b/google/cloud/aiplatform_v1/types/annotation_spec.py index 08d5644eaf..0e57f04115 100644 --- a/google/cloud/aiplatform_v1/types/annotation_spec.py +++ b/google/cloud/aiplatform_v1/types/annotation_spec.py @@ -37,7 +37,7 @@ class AnnotationSpec(proto.Message): display_name (str): Required. The user-defined name of the AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index a2c6075813..5f3ed9e4e3 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -107,8 +107,8 @@ class BatchPredictionJob(proto.Message): The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if - used, may not have enough permission to access other GCP - resources. + used, may not have enough permission to access other Google + Cloud resources. Users deploying the Model must have the ``iam.serviceAccounts.actAs`` permission on this service @@ -170,8 +170,8 @@ class BatchPredictionJob(proto.Message): Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. + Status details fields contain standard Google + Cloud error details. resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed): Output only. Information about resources that had been consumed by this job. Provided in real diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index ea4c102339..7bce04c03f 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -51,7 +51,7 @@ class CustomJob(proto.Message): display_name (str): Required. The display name of the CustomJob. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): Required. Job spec. state (google.cloud.aiplatform_v1.types.JobState): diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py index 0799b0e85d..17dee1f811 100644 --- a/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -45,7 +45,7 @@ class DataLabelingJob(proto.Message): display_name (str): Required. The user-defined name of the DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. datasets (Sequence[str]): diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index 138f869a06..6f94940695 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -41,9 +41,9 @@ class Dataset(proto.Message): display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. + long and can consist of any UTF-8 characters. description (str): - Optional. The description of the Dataset. + The description of the Dataset. metadata_schema_uri (str): Required. Points to a YAML file stored on Google Cloud Storage describing additional @@ -88,6 +88,11 @@ class Dataset(proto.Message): Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + metadata_artifact (str): + Output only. The resource name of the Artifact that was + created in MetadataStore when creating the Dataset. The + Artifact resource name pattern is + ``projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}``. """ name = proto.Field( @@ -135,6 +140,10 @@ class Dataset(proto.Message): number=11, message=gca_encryption_spec.EncryptionSpec, ) + metadata_artifact = proto.Field( + proto.STRING, + number=17, + ) class ImportDataConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 653f25e515..b89edef768 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -44,7 +44,7 @@ class Endpoint(proto.Message): display_name (str): Required. The display name of the Endpoint. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Endpoint. deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): diff --git a/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1/types/featurestore_online_service.py index 19b88981f5..667cded8b6 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -23,6 +23,9 @@ __protobuf__ = proto.module( package="google.cloud.aiplatform.v1", manifest={ + "WriteFeatureValuesRequest", + "WriteFeatureValuesPayload", + "WriteFeatureValuesResponse", "ReadFeatureValuesRequest", "ReadFeatureValuesResponse", "StreamingReadFeatureValuesRequest", @@ -32,6 +35,66 @@ ) +class WriteFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the + entities being written. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + payloads (Sequence[google.cloud.aiplatform_v1.types.WriteFeatureValuesPayload]): + Required. The entities to be written. Up to 100,000 feature + values can be written across all ``payloads``. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + payloads = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="WriteFeatureValuesPayload", + ) + + +class WriteFeatureValuesPayload(proto.Message): + r"""Contains Feature values to be written for a specific entity. + + Attributes: + entity_id (str): + Required. The ID of the entity. + feature_values (Mapping[str, google.cloud.aiplatform_v1.types.FeatureValue]): + Required. Feature values to be written, mapping from Feature + ID to value. Up to 100,000 ``feature_values`` entries may be + written across all payloads. The feature generation time, + aligned by days, must be no older than five years (1825 + days) and no later than one year (366 days) in the future. + """ + + entity_id = proto.Field( + proto.STRING, + number=1, + ) + feature_values = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message="FeatureValue", + ) + + +class WriteFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues]. + + """ + + class ReadFeatureValuesRequest(proto.Message): r"""Request message for [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index 14640e33fa..4c9341ba36 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -1575,6 +1575,9 @@ class ImportFeatureValuesOperationMetadata(proto.Message): imported_feature_value_count (int): Number of Feature values that have been imported by the operation. + source_uris (Sequence[str]): + The source URI from where Feature values are + imported. invalid_row_count (int): The number of rows in input source that weren't imported due to either @@ -1602,6 +1605,10 @@ class ImportFeatureValuesOperationMetadata(proto.Message): proto.INT64, number=3, ) + source_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) invalid_row_count = proto.Field( proto.INT64, number=6, diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py index 55aa325755..f4e73d71c0 100644 --- a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -43,8 +43,8 @@ class HyperparameterTuningJob(proto.Message): display_name (str): Required. The display name of the HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. + 128 characters long and can consist of any UTF-8 + characters. study_spec (google.cloud.aiplatform_v1.types.StudySpec): Required. Study configuration of the HyperparameterTuningJob. diff --git a/google/cloud/aiplatform_v1/types/index.py b/google/cloud/aiplatform_v1/types/index.py index 5a2edba05c..b244da1a25 100644 --- a/google/cloud/aiplatform_v1/types/index.py +++ b/google/cloud/aiplatform_v1/types/index.py @@ -41,7 +41,7 @@ class Index(proto.Message): display_name (str): Required. The display name of the Index. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Index. metadata_schema_uri (str): diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index c70166d522..9cab7dfa08 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -831,7 +831,7 @@ class StatsAnomaliesObjective(proto.Message): [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] and [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] - are fetched, and page token doesn't take affect in this + are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 972f6e6af1..34582ee0cc 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -66,7 +66,7 @@ class Model(proto.Message): display_name (str): Required. The display name of the Model. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Model. version_description (str): diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index 53f1dbe735..c99883948f 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -60,7 +60,7 @@ class ModelDeploymentMonitoringJob(proto.Message): display_name (str): Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up - to 128 characters long and can be consist of any + to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. endpoint (str): diff --git a/google/cloud/aiplatform_v1/types/operation.py b/google/cloud/aiplatform_v1/types/operation.py index c6841e6db9..7f64be5d05 100644 --- a/google/cloud/aiplatform_v1/types/operation.py +++ b/google/cloud/aiplatform_v1/types/operation.py @@ -36,8 +36,8 @@ class GenericOperationMetadata(proto.Message): Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. + Status details field will contain standard + Google Cloud error details. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was created. diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index 0b555f6e10..3c08fe19a5 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -49,7 +49,7 @@ class PipelineJob(proto.Message): display_name (str): The display name of the Pipeline. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Pipeline creation time. start_time (google.protobuf.timestamp_pb2.Timestamp): @@ -108,9 +108,9 @@ class PipelineJob(proto.Message): Private services access must already be configured for the network. Pipeline job will apply the network configuration - to the GCP resources being launched, if applied, such as - Vertex AI Training or Dataflow job. If left unspecified, the - workload is not peered with any network. + to the Google Cloud resources being launched, if applied, + such as Vertex AI Training or Dataflow job. If left + unspecified, the workload is not peered with any network. template_uri (str): A template uri from where the [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec], diff --git a/google/cloud/aiplatform_v1/types/specialist_pool.py b/google/cloud/aiplatform_v1/types/specialist_pool.py index 76d49a9334..1ae349fda7 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool.py @@ -40,7 +40,7 @@ class SpecialistPool(proto.Message): display_name (str): Required. The user-defined name of the SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. This field should be unique on project-level. specialist_managers_count (int): diff --git a/google/cloud/aiplatform_v1/types/tensorboard.py b/google/cloud/aiplatform_v1/types/tensorboard.py index c50d38acf5..ff21a9f105 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1/types/tensorboard.py @@ -30,8 +30,8 @@ class Tensorboard(proto.Message): r"""Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users can also create extra - Tensorboards in their projects. + region of a Google Cloud project. If needed users can also + create extra Tensorboards in their projects. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 58bd8fda5a..70e4590d01 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -668,6 +668,7 @@ async def sample_update_featurestore(): - ``labels`` - ``online_serving_config.fixed_node_count`` - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1319,6 +1320,7 @@ async def sample_update_entity_type(): - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - ``monitoring_config.numerical_threshold_config.value`` - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 7981fdf8df..836b8578bf 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -934,6 +934,7 @@ def sample_update_featurestore(): - ``labels`` - ``online_serving_config.fixed_node_count`` - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1585,6 +1586,7 @@ def sample_update_entity_type(): - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - ``monitoring_config.numerical_threshold_config.value`` - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index c00bb559e9..6461c44027 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -196,40 +196,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index d02615ac25..38c1f841cc 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -330,8 +330,8 @@ async def sample_create_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -445,9 +445,9 @@ async def sample_get_tensorboard(): Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. + region of a Google Cloud project. If + needed users can also create extra + Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -573,8 +573,8 @@ async def sample_update_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 1afb27d40d..c88e073f93 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -606,8 +606,8 @@ def sample_create_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -721,9 +721,9 @@ def sample_get_tensorboard(): Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. + region of a Google Cloud project. If + needed users can also create extra + Tensorboards in their projects. """ # Create or coerce a protobuf request object. @@ -849,8 +849,8 @@ def sample_update_tensorboard(): The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. + Google Cloud project. If needed users can also create + extra Tensorboards in their projects. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/types/annotation_spec.py b/google/cloud/aiplatform_v1beta1/types/annotation_spec.py index 1f950d298a..08e84ec76c 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation_spec.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation_spec.py @@ -37,7 +37,7 @@ class AnnotationSpec(proto.Message): display_name (str): Required. The user-defined name of the AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 83d696706a..0d5f1f3110 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -115,8 +115,8 @@ class BatchPredictionJob(proto.Message): The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if - used, may not have enough permission to access other GCP - resources. + used, may not have enough permission to access other Google + Cloud resources. Users deploying the Model must have the ``iam.serviceAccounts.actAs`` permission on this service @@ -179,8 +179,8 @@ class BatchPredictionJob(proto.Message): Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. + Status details fields contain standard Google + Cloud error details. resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed): Output only. Information about resources that had been consumed by this job. Provided in real diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 11204de285..0e603c9b57 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -51,7 +51,7 @@ class CustomJob(proto.Message): display_name (str): Required. The display name of the CustomJob. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): Required. Job spec. state (google.cloud.aiplatform_v1beta1.types.JobState): diff --git a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py index fcf727b3fb..99cc4a2076 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -45,7 +45,7 @@ class DataLabelingJob(proto.Message): display_name (str): Required. The user-defined name of the DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. datasets (Sequence[str]): diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index af0bb821dd..ac2662b8bf 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -41,9 +41,9 @@ class Dataset(proto.Message): display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. + long and can consist of any UTF-8 characters. description (str): - Optional. The description of the Dataset. + The description of the Dataset. metadata_schema_uri (str): Required. Points to a YAML file stored on Google Cloud Storage describing additional @@ -88,6 +88,11 @@ class Dataset(proto.Message): Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + metadata_artifact (str): + Output only. The resource name of the Artifact that was + created in MetadataStore when creating the Dataset. The + Artifact resource name pattern is + ``projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}``. """ name = proto.Field( @@ -135,6 +140,10 @@ class Dataset(proto.Message): number=11, message=gca_encryption_spec.EncryptionSpec, ) + metadata_artifact = proto.Field( + proto.STRING, + number=17, + ) class ImportDataConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 961107ad8b..6481243d91 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -44,7 +44,7 @@ class Endpoint(proto.Message): display_name (str): Required. The display name of the Endpoint. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Endpoint. deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): diff --git a/google/cloud/aiplatform_v1beta1/types/entity_type.py b/google/cloud/aiplatform_v1beta1/types/entity_type.py index 2e29d24000..b6527890bc 100644 --- a/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ b/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -79,6 +79,14 @@ class EntityType(proto.Message): [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is disabled. + offline_storage_ttl_days (int): + Optional. Config for data retention policy in offline + storage. TTL in days for feature values that will be stored + in offline storage. The Feature Store offline storage + periodically removes obsolete feature values older than + ``offline_storage_ttl_days`` since the feature generation + time. If unset (or explicitly set to 0), default to 4000 + days TTL. """ name = proto.Field( @@ -113,6 +121,10 @@ class EntityType(proto.Message): number=8, message=featurestore_monitoring.FeaturestoreMonitoringConfig, ) + offline_storage_ttl_days = proto.Field( + proto.INT32, + number=10, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index aa7b45e938..f54aa19f0b 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -69,6 +69,15 @@ class Featurestore(proto.Message): serving. state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): Output only. State of the featurestore. + online_storage_ttl_days (int): + Optional. TTL in days for feature values that will be stored + in online serving storage. The Feature Store online storage + periodically removes obsolete feature values older than + ``online_storage_ttl_days`` since the feature generation + time. Note that ``online_storage_ttl_days`` should be less + than or equal to ``offline_storage_ttl_days`` for each + EntityType under a featurestore. If not set, default to 4000 + days encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): Optional. Customer-managed encryption key spec for data storage. If set, both of the @@ -168,6 +177,10 @@ class Scaling(proto.Message): number=8, enum=State, ) + online_storage_ttl_days = proto.Field( + proto.INT32, + number=13, + ) encryption_spec = proto.Field( proto.MESSAGE, number=10, diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 3212de066b..0edf675493 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -266,6 +266,7 @@ class UpdateFeaturestoreRequest(proto.Message): - ``labels`` - ``online_serving_config.fixed_node_count`` - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` """ featurestore = proto.Field( @@ -1074,6 +1075,7 @@ class UpdateEntityTypeRequest(proto.Message): - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - ``monitoring_config.numerical_threshold_config.value`` - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` """ entity_type = proto.Field( @@ -1582,6 +1584,9 @@ class ImportFeatureValuesOperationMetadata(proto.Message): imported_feature_value_count (int): Number of Feature values that have been imported by the operation. + source_uris (Sequence[str]): + The source URI from where Feature values are + imported. invalid_row_count (int): The number of rows in input source that weren't imported due to either @@ -1609,6 +1614,10 @@ class ImportFeatureValuesOperationMetadata(proto.Message): proto.INT64, number=3, ) + source_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) invalid_row_count = proto.Field( proto.INT64, number=6, diff --git a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py index 11c88fdfa4..f044473d6e 100644 --- a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -43,8 +43,8 @@ class HyperparameterTuningJob(proto.Message): display_name (str): Required. The display name of the HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. + 128 characters long and can consist of any UTF-8 + characters. study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): Required. Study configuration of the HyperparameterTuningJob. diff --git a/google/cloud/aiplatform_v1beta1/types/index.py b/google/cloud/aiplatform_v1beta1/types/index.py index 3154d1cb42..604f73ab1e 100644 --- a/google/cloud/aiplatform_v1beta1/types/index.py +++ b/google/cloud/aiplatform_v1beta1/types/index.py @@ -41,7 +41,7 @@ class Index(proto.Message): display_name (str): Required. The display name of the Index. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Index. metadata_schema_uri (str): diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 01651d66ff..55aab64165 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -833,7 +833,7 @@ class StatsAnomaliesObjective(proto.Message): [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] and [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] - are fetched, and page token doesn't take affect in this + are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index d25dac0006..b2ef32e5ef 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -66,7 +66,7 @@ class Model(proto.Message): display_name (str): Required. The display name of the Model. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. description (str): The description of the Model. version_description (str): diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 64d4d2bf7e..8323380378 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -60,7 +60,7 @@ class ModelDeploymentMonitoringJob(proto.Message): display_name (str): Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up - to 128 characters long and can be consist of any + to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. endpoint (str): diff --git a/google/cloud/aiplatform_v1beta1/types/operation.py b/google/cloud/aiplatform_v1beta1/types/operation.py index 2ca36d9504..6f4b42cb83 100644 --- a/google/cloud/aiplatform_v1beta1/types/operation.py +++ b/google/cloud/aiplatform_v1beta1/types/operation.py @@ -36,8 +36,8 @@ class GenericOperationMetadata(proto.Message): Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. + Status details field will contain standard + Google Cloud error details. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was created. diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index 400b8b4ab0..568939cb96 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -49,7 +49,7 @@ class PipelineJob(proto.Message): display_name (str): The display name of the Pipeline. The name can be up to 128 characters long and - can be consist of any UTF-8 characters. + can consist of any UTF-8 characters. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Pipeline creation time. start_time (google.protobuf.timestamp_pb2.Timestamp): @@ -108,9 +108,9 @@ class PipelineJob(proto.Message): Private services access must already be configured for the network. Pipeline job will apply the network configuration - to the GCP resources being launched, if applied, such as - Vertex AI Training or Dataflow job. If left unspecified, the - workload is not peered with any network. + to the Google Cloud resources being launched, if applied, + such as Vertex AI Training or Dataflow job. If left + unspecified, the workload is not peered with any network. template_uri (str): A template uri from where the [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec], diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py index ae31ca518e..00f85435b5 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py @@ -40,7 +40,7 @@ class SpecialistPool(proto.Message): display_name (str): Required. The user-defined name of the SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 + characters long and can consist of any UTF-8 characters. This field should be unique on project-level. specialist_managers_count (int): diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/google/cloud/aiplatform_v1beta1/types/tensorboard.py index d84252fe4a..66bfaf17a8 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -30,8 +30,8 @@ class Tensorboard(proto.Message): r"""Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users can also create extra - Tensorboards in their projects. + region of a Google Cloud project. If needed users can also + create extra Tensorboards in their projects. Attributes: name (str): diff --git a/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_async.py b/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_async.py new file mode 100644 index 0000000000..8f38e6f7b1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_write_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + payloads = aiplatform_v1.WriteFeatureValuesPayload() + payloads.entity_id = "entity_id_value" + + request = aiplatform_v1.WriteFeatureValuesRequest( + entity_type="entity_type_value", + payloads=payloads, + ) + + # Make the request + response = await client.write_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_sync.py b/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_sync.py new file mode 100644 index 0000000000..21ea23fc34 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_write_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + payloads = aiplatform_v1.WriteFeatureValuesPayload() + payloads.entity_id = "entity_id_value" + + request = aiplatform_v1.WriteFeatureValuesRequest( + entity_type="entity_type_value", + payloads=payloads, + ) + + # Make the request + response = client.write_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_sync] diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1.json index 96b1c6b980..6b8e566765 100644 --- a/samples/generated_samples/snippet_metadata_aiplatform_v1.json +++ b/samples/generated_samples/snippet_metadata_aiplatform_v1.json @@ -3318,6 +3318,175 @@ ], "title": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.write_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "WriteFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "payloads", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteFeatureValuesPayload]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteFeatureValuesResponse", + "shortName": "write_feature_values" + }, + "description": "Sample for WriteFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.write_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.WriteFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "WriteFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "payloads", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteFeatureValuesPayload]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteFeatureValuesResponse", + "shortName": "write_feature_values" + }, + "description": "Sample for WriteFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_write_feature_values_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py index 8119fdb95f..0c2b076ccb 100644 --- a/samples/model-builder/conftest.py +++ b/samples/model-builder/conftest.py @@ -393,6 +393,19 @@ def mock_create_batch_prediction_job(): yield mock +""" +---------------------------------------------------------------------------- +Tensorboard Fixtures +---------------------------------------------------------------------------- +""" + + +@pytest.fixture +def mock_create_tensorboard(): + with patch.object(aiplatform.tensorboard.Tensorboard, "create") as mock: + yield mock + + """ ---------------------------------------------------------------------------- Endpoint Fixtures diff --git a/samples/model-builder/create_tensorboard_sample.py b/samples/model-builder/create_tensorboard_sample.py new file mode 100644 index 0000000000..e60379aed2 --- /dev/null +++ b/samples/model-builder/create_tensorboard_sample.py @@ -0,0 +1,37 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud import aiplatform + + +# [START aiplatform_sdk_create_tensorboard_sample] +def create_tensorboard_sample( + project: str, + display_name: str, + location: str, +): + aiplatform.init(project=project, location=location) + + tensorboard = aiplatform.tensorboard.Tensorboard.create( + display_name=display_name, + project=project, + location=location, + ) + + print(tensorboard.display_name) + print(tensorboard.resource_name) + return tensorboard + + +# [END aiplatform_sdk_create_tensorboard_sample] diff --git a/samples/model-builder/create_tensorboard_sample_test.py b/samples/model-builder/create_tensorboard_sample_test.py new file mode 100644 index 0000000000..64583220f9 --- /dev/null +++ b/samples/model-builder/create_tensorboard_sample_test.py @@ -0,0 +1,36 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import create_tensorboard_sample +import test_constants as constants + + +def test_create_tensorboard_sample(mock_sdk_init, mock_create_tensorboard): + + create_tensorboard_sample.create_tensorboard_sample( + project=constants.PROJECT, + display_name=constants.DISPLAY_NAME, + location=constants.LOCATION, + ) + + mock_sdk_init.assert_called_once_with( + project=constants.PROJECT, location=constants.LOCATION + ) + + mock_create_tensorboard.assert_called_once_with( + display_name=constants.DISPLAY_NAME, + project=constants.PROJECT, + location=constants.LOCATION, + ) diff --git a/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample.py b/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample.py index 0a65a82f7b..5b3706cc7c 100644 --- a/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample.py +++ b/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample.py @@ -18,11 +18,11 @@ # [START aiplatform_sdk_get_experiments_data_frame_sample] def get_experiments_data_frame_sample( - experiment_name: str, + experiment: str, project: str, location: str, ): - aiplatform.init(experiment_name=experiment_name, project=project, location=location) + aiplatform.init(experiment=experiment, project=project, location=location) experiments_df = aiplatform.get_experiment_df() diff --git a/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample_test.py b/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample_test.py index 8740f5c2c7..b042de0bfc 100644 --- a/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample_test.py +++ b/samples/model-builder/experiment_tracking/get_experiment_data_frame_sample_test.py @@ -22,7 +22,7 @@ @pytest.mark.usefixtures("mock_sdk_init") def test_get_experiments_data_frame_sample(mock_get_experiment_df, mock_df): df = get_experiment_data_frame_sample.get_experiments_data_frame_sample( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, ) diff --git a/setup.py b/setup.py index d589d6dd0d..411e4f07b8 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ lit_extra_require = [ "tensorflow >= 2.3.0, <3.0.0dev", "pandas >= 1.0.0", - "lit-nlp >= 0.4.0", + "lit-nlp == 0.4.0", "explainable-ai-sdk >= 1.0.0", ] profiler_extra_require = [ diff --git a/tests/system/aiplatform/test_model_monitoring.py b/tests/system/aiplatform/test_model_monitoring.py index cf347337ff..633ad7f3b6 100644 --- a/tests/system/aiplatform/test_model_monitoring.py +++ b/tests/system/aiplatform/test_model_monitoring.py @@ -30,7 +30,7 @@ ) # constants used for testing -USER_EMAIL = "" +USER_EMAIL = "rosiezou@cloudadvocacyorg.joonix.net" PERMANENT_CHURN_ENDPOINT_ID = "1843089351408353280" CHURN_MODEL_PATH = "gs://mco-mm/churn" DEFAULT_INPUT = { diff --git a/tests/unit/aiplatform/test_cloud_profiler.py b/tests/unit/aiplatform/test_cloud_profiler.py index 388405d034..b686419361 100644 --- a/tests/unit/aiplatform/test_cloud_profiler.py +++ b/tests/unit/aiplatform/test_cloud_profiler.py @@ -31,8 +31,12 @@ from google.api_core import exceptions from google.cloud import aiplatform from google.cloud.aiplatform import training_utils -from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader -from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import base_plugin +from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import ( + profile_uploader, +) +from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import ( + base_plugin, +) from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import ( tf_profiler, ) @@ -175,15 +179,21 @@ def tf_import_mock(name, *args, **kwargs): def testCanInitializeTFVersion(self): import tensorflow - with mock.patch.object(tensorflow, "__version__", return_value="1.2.3.4"): + with mock.patch.object(tensorflow, "__version__", "1.2.3.4"): assert not TFProfiler.can_initialize() def testCanInitializeOldTFVersion(self): import tensorflow - with mock.patch.object(tensorflow, "__version__", return_value="2.3.0"): + with mock.patch.object(tensorflow, "__version__", "2.3.0"): assert not TFProfiler.can_initialize() + def testCanInitializeRcTFVersion(self): + import tensorflow as tf + + with mock.patch.object(tf, "__version__", "2.4.0-rc2"): + assert TFProfiler.can_initialize() + def testCanInitializeNoProfilePlugin(self): orig_find_spec = importlib.util.find_spec diff --git a/tests/unit/aiplatform/test_metadata_schema.py b/tests/unit/aiplatform/test_metadata_schema.py index e12af35b23..826a99b942 100644 --- a/tests/unit/aiplatform/test_metadata_schema.py +++ b/tests/unit/aiplatform/test_metadata_schema.py @@ -817,7 +817,7 @@ def test_vertex_endpoint_constructor_parameters_are_set_correctly(self): assert artifact.schema_version == _TEST_SCHEMA_VERSION def test_unmanaged_container_model_title_is_set_correctly(self): - predict_schema_ta = utils.PredictSchemata( + predict_schemata = utils.PredictSchemata( instance_schema_uri="instance_uri", prediction_schema_uri="prediction_uri", parameters_schema_uri="parameters_uri", @@ -827,13 +827,13 @@ def test_unmanaged_container_model_title_is_set_correctly(self): image_uri="gcr.io/test_container_image_uri" ) artifact = google_artifact_schema.UnmanagedContainerModel( - predict_schema_ta=predict_schema_ta, + predict_schemata=predict_schemata, container_spec=container_spec, ) assert artifact.schema_title == "google.UnmanagedContainerModel" def test_unmanaged_container_model_constructor_parameters_are_set_correctly(self): - predict_schema_ta = utils.PredictSchemata( + predict_schemata = utils.PredictSchemata( instance_schema_uri="instance_uri", prediction_schema_uri="prediction_uri", parameters_schema_uri="parameters_uri", @@ -844,7 +844,7 @@ def test_unmanaged_container_model_constructor_parameters_are_set_correctly(self ) artifact = google_artifact_schema.UnmanagedContainerModel( - predict_schema_ta=predict_schema_ta, + predict_schemata=predict_schemata, container_spec=container_spec, artifact_id=_TEST_ARTIFACT_ID, uri=_TEST_URI, @@ -1253,7 +1253,7 @@ def teardown_method(self): initializer.global_pool.shutdown(wait=True) def test_predict_schemata_to_dict_method_returns_correct_schema(self): - predict_schema_ta = utils.PredictSchemata( + predict_schemata = utils.PredictSchemata( instance_schema_uri="instance_uri", prediction_schema_uri="prediction_uri", parameters_schema_uri="parameters_uri", @@ -1264,7 +1264,7 @@ def test_predict_schemata_to_dict_method_returns_correct_schema(self): "predictionSchemaUri": "prediction_uri", } - assert json.dumps(predict_schema_ta.to_dict()) == json.dumps(expected_results) + assert json.dumps(predict_schemata.to_dict()) == json.dumps(expected_results) def test_create_uri_from_resource_name_for_valid_resouce_names(self): valid_resouce_names = [ diff --git a/tests/unit/aiplatform/test_pipeline_based_service.py b/tests/unit/aiplatform/test_pipeline_based_service.py index 4a9acd600b..f751671462 100644 --- a/tests/unit/aiplatform/test_pipeline_based_service.py +++ b/tests/unit/aiplatform/test_pipeline_based_service.py @@ -596,15 +596,9 @@ def test_create_and_submit_pipeline_job( == test_backing_pipeline_job.resource_name ) - @pytest.mark.parametrize( - "job_spec_json", - [_TEST_PIPELINE_JOB], - ) def test_list_pipeline_based_service( self, mock_pipeline_based_service_get, - mock_load_yaml_and_json, - job_spec_json, get_execution_mock, list_executions_mock, ): @@ -635,3 +629,25 @@ def test_list_pipeline_based_service( # only 1 of the 2 executions in list_executions_mock matches the # properties of FakePipelineBasedService assert len(test_list_request) == 1 + + def test_list_pipeline_based_service_with_template_name_identifier( + self, + mock_pipeline_based_service_get, + get_execution_mock, + list_executions_mock, + ): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + ) + + self.FakePipelineBasedService._template_name_identifier = ( + _TEST_INVALID_PIPELINE_NAME_IDENTIFIER + ) + + test_list_request = self.FakePipelineBasedService.list() + + # None of the mock pipelines match the `_template_name_identifier` + # set above, so the returned list should be empty + assert len(test_list_request) == 0 diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index fdadb2a1b1..326e83983f 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -39,6 +39,7 @@ from google.cloud import aiplatform from google.cloud.aiplatform import base from google.cloud.aiplatform import datasets +from google.cloud.aiplatform import explain from google.cloud.aiplatform import initializer from google.cloud.aiplatform import schema from google.cloud.aiplatform import training_jobs @@ -169,6 +170,23 @@ ) _TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) + +# Explanation Spec +_TEST_EXPLANATION_METADATA = explain.ExplanationMetadata( + inputs={ + "features": { + "input_tensor_name": "dense_input", + "encoding": "BAG_OF_FEATURES", + "modality": "numeric", + "index_feature_mapping": ["abc", "def", "ghj"], + } + }, + outputs={"medv": {"output_tensor_name": "dense_2"}}, +) +_TEST_EXPLANATION_PARAMETERS = explain.ExplanationParameters( + {"sampled_shapley_attribution": {"path_count": 10}} +) + # CMEK encryption _TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default" _TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec( @@ -923,6 +941,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, model_description=_TEST_MODEL_DESCRIPTION, + explanation_metadata=_TEST_EXPLANATION_METADATA, + explanation_parameters=_TEST_EXPLANATION_PARAMETERS, ) model_from_job = job.run( @@ -1018,6 +1038,10 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, ), + explanation_spec=gca_model.explanation.ExplanationSpec( + metadata=_TEST_EXPLANATION_METADATA, + parameters=_TEST_EXPLANATION_PARAMETERS, + ), encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, version_aliases=["default"], ) @@ -1075,6 +1099,70 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( assert job._has_logged_custom_job + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + def test_custom_training_job_run_raises_with_impartial_explanation_spec( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_python_package_to_gcs, + mock_tabular_dataset, + mock_model_service_get, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + credentials=_TEST_CREDENTIALS, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + script_path=_TEST_LOCAL_SCRIPT_FILE_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + explanation_metadata=_TEST_EXPLANATION_METADATA, + # Missing the required explanations_parameters field + ) + + with pytest.raises(ValueError) as e: + job.run( + dataset=mock_tabular_dataset, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, + tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + sync=False, + create_request_timeout=None, + ) + + assert e.match( + regexp=r"To get model explanation, `explanation_parameters` " + "must be specified." + ) + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) def test_custom_training_tabular_done( @@ -2925,6 +3013,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, model_description=_TEST_MODEL_DESCRIPTION, + explanation_metadata=_TEST_EXPLANATION_METADATA, + explanation_parameters=_TEST_EXPLANATION_PARAMETERS, ) model_from_job = job.run( @@ -3002,6 +3092,10 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, ), + explanation_spec=gca_model.explanation.ExplanationSpec( + metadata=_TEST_EXPLANATION_METADATA, + parameters=_TEST_EXPLANATION_PARAMETERS, + ), encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, version_aliases=["default"], ) @@ -3060,6 +3154,62 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( assert job._has_logged_custom_job + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + def test_custom_container_training_job_run_raises_with_impartial_explanation_spec( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_tabular_dataset, + mock_model_service_get, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomContainerTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + command=_TEST_TRAINING_CONTAINER_CMD, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + explanation_metadata=_TEST_EXPLANATION_METADATA, + # Missing the required explanations_parameters field + ) + + with pytest.raises(ValueError) as e: + job.run( + dataset=mock_tabular_dataset, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + service_account=_TEST_SERVICE_ACCOUNT, + tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=None, + ) + assert e.match( + regexp=r"To get model explanation, `explanation_parameters` " + "must be specified." + ) + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) @pytest.mark.parametrize("sync", [True, False]) @@ -4868,6 +5018,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + explanation_metadata=_TEST_EXPLANATION_METADATA, + explanation_parameters=_TEST_EXPLANATION_PARAMETERS, ) model_from_job = job.run( @@ -4955,6 +5107,10 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, ), encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + explanation_spec=gca_model.explanation.ExplanationSpec( + metadata=_TEST_EXPLANATION_METADATA, + parameters=_TEST_EXPLANATION_PARAMETERS, + ), version_aliases=["default"], ) @@ -5008,6 +5164,64 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + def test_custom_python_package_training_job_run_raises_with_impartial_explanation_spec( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_tabular_dataset, + mock_model_service_get, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomContainerTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + command=_TEST_TRAINING_CONTAINER_CMD, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + explanation_metadata=_TEST_EXPLANATION_METADATA, + # Missing the required explanations_parameters field + ) + + with pytest.raises(ValueError) as e: + job.run( + dataset=mock_tabular_dataset, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, + ) + assert e.match( + regexp=r"To get model explanation, `explanation_parameters` " + "must be specified." + ) + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) @pytest.mark.parametrize("sync", [True, False]) diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index 92cc816eac..c21e5c735d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -965,6 +965,7 @@ def test_get_dataset(request_type, transport: str = "grpc"): description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) response = client.get_dataset(request) @@ -980,6 +981,7 @@ def test_get_dataset(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" def test_get_dataset_empty_call(): @@ -1021,6 +1023,7 @@ async def test_get_dataset_async( description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) ) response = await client.get_dataset(request) @@ -1037,6 +1040,7 @@ async def test_get_dataset_async( assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" @pytest.mark.asyncio @@ -1209,6 +1213,7 @@ def test_update_dataset(request_type, transport: str = "grpc"): description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) response = client.update_dataset(request) @@ -1224,6 +1229,7 @@ def test_update_dataset(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" def test_update_dataset_empty_call(): @@ -1265,6 +1271,7 @@ async def test_update_dataset_async( description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) ) response = await client.update_dataset(request) @@ -1281,6 +1288,7 @@ async def test_update_dataset_async( assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" @pytest.mark.asyncio diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py index 2907c81054..582c8dfba3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -48,12 +48,14 @@ ) from google.cloud.aiplatform_v1.types import feature_selector from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.aiplatform_v1.types import types from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 # type: ignore import google.auth @@ -1235,6 +1237,281 @@ async def test_streaming_read_feature_values_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + featurestore_online_service.WriteFeatureValuesRequest, + dict, + ], +) +def test_write_feature_values(request_type, transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.WriteFeatureValuesResponse() + response = client.write_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.WriteFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.WriteFeatureValuesResponse) + + +def test_write_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + client.write_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.WriteFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_write_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_online_service.WriteFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.WriteFeatureValuesResponse() + ) + response = await client.write_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.WriteFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.WriteFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_write_feature_values_async_from_dict(): + await test_write_feature_values_async(request_type=dict) + + +def test_write_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.WriteFeatureValuesRequest() + + request.entity_type = "entity_type_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + call.return_value = featurestore_online_service.WriteFeatureValuesResponse() + client.write_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "entity_type=entity_type_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_write_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.WriteFeatureValuesRequest() + + request.entity_type = "entity_type_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.WriteFeatureValuesResponse() + ) + await client.write_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "entity_type=entity_type_value", + ) in kw["metadata"] + + +def test_write_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.WriteFeatureValuesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_feature_values( + entity_type="entity_type_value", + payloads=[ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = "entity_type_value" + assert arg == mock_val + arg = args[0].payloads + mock_val = [ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ] + assert arg == mock_val + + +def test_write_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_feature_values( + featurestore_online_service.WriteFeatureValuesRequest(), + entity_type="entity_type_value", + payloads=[ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ], + ) + + +@pytest.mark.asyncio +async def test_write_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.WriteFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.WriteFeatureValuesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_feature_values( + entity_type="entity_type_value", + payloads=[ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = "entity_type_value" + assert arg == mock_val + arg = args[0].payloads + mock_val = [ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_write_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_feature_values( + featurestore_online_service.WriteFeatureValuesRequest(), + entity_type="entity_type_value", + payloads=[ + featurestore_online_service.WriteFeatureValuesPayload( + entity_id="entity_id_value" + ) + ], + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( @@ -1376,6 +1653,7 @@ def test_featurestore_online_serving_service_base_transport(): methods = ( "read_feature_values", "streaming_read_feature_values", + "write_feature_values", "set_iam_policy", "get_iam_policy", "test_iam_permissions", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index addc5abbd8..44c9c20ac5 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -967,6 +967,7 @@ def test_get_dataset(request_type, transport: str = "grpc"): description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) response = client.get_dataset(request) @@ -982,6 +983,7 @@ def test_get_dataset(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" def test_get_dataset_empty_call(): @@ -1023,6 +1025,7 @@ async def test_get_dataset_async( description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) ) response = await client.get_dataset(request) @@ -1039,6 +1042,7 @@ async def test_get_dataset_async( assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" @pytest.mark.asyncio @@ -1211,6 +1215,7 @@ def test_update_dataset(request_type, transport: str = "grpc"): description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) response = client.update_dataset(request) @@ -1226,6 +1231,7 @@ def test_update_dataset(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" def test_update_dataset_empty_call(): @@ -1267,6 +1273,7 @@ async def test_update_dataset_async( description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", + metadata_artifact="metadata_artifact_value", ) ) response = await client.update_dataset(request) @@ -1283,6 +1290,7 @@ async def test_update_dataset_async( assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" + assert response.metadata_artifact == "metadata_artifact_value" @pytest.mark.asyncio diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index 6eb4d25a5c..0855d80921 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -1017,6 +1017,7 @@ def test_get_featurestore(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", state=featurestore.Featurestore.State.STABLE, + online_storage_ttl_days=2460, ) response = client.get_featurestore(request) @@ -1030,6 +1031,7 @@ def test_get_featurestore(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.etag == "etag_value" assert response.state == featurestore.Featurestore.State.STABLE + assert response.online_storage_ttl_days == 2460 def test_get_featurestore_empty_call(): @@ -1070,6 +1072,7 @@ async def test_get_featurestore_async( name="name_value", etag="etag_value", state=featurestore.Featurestore.State.STABLE, + online_storage_ttl_days=2460, ) ) response = await client.get_featurestore(request) @@ -1084,6 +1087,7 @@ async def test_get_featurestore_async( assert response.name == "name_value" assert response.etag == "etag_value" assert response.state == featurestore.Featurestore.State.STABLE + assert response.online_storage_ttl_days == 2460 @pytest.mark.asyncio @@ -2464,6 +2468,7 @@ def test_get_entity_type(request_type, transport: str = "grpc"): name="name_value", description="description_value", etag="etag_value", + offline_storage_ttl_days=2554, ) response = client.get_entity_type(request) @@ -2477,6 +2482,7 @@ def test_get_entity_type(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.offline_storage_ttl_days == 2554 def test_get_entity_type_empty_call(): @@ -2517,6 +2523,7 @@ async def test_get_entity_type_async( name="name_value", description="description_value", etag="etag_value", + offline_storage_ttl_days=2554, ) ) response = await client.get_entity_type(request) @@ -2531,6 +2538,7 @@ async def test_get_entity_type_async( assert response.name == "name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.offline_storage_ttl_days == 2554 @pytest.mark.asyncio @@ -3150,6 +3158,7 @@ def test_update_entity_type(request_type, transport: str = "grpc"): name="name_value", description="description_value", etag="etag_value", + offline_storage_ttl_days=2554, ) response = client.update_entity_type(request) @@ -3163,6 +3172,7 @@ def test_update_entity_type(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.offline_storage_ttl_days == 2554 def test_update_entity_type_empty_call(): @@ -3207,6 +3217,7 @@ async def test_update_entity_type_async( name="name_value", description="description_value", etag="etag_value", + offline_storage_ttl_days=2554, ) ) response = await client.update_entity_type(request) @@ -3221,6 +3232,7 @@ async def test_update_entity_type_async( assert response.name == "name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.offline_storage_ttl_days == 2554 @pytest.mark.asyncio diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 0185bdabbe..7c36d84b95 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -2006,22 +2006,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -2031,19 +2028,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected)