如要向 Video Intelligence 進行驗證,請設定應用程式預設憑證。
詳情請參閱「為本機開發環境設定驗證」。
importiofromgoogle.cloudimportvideointelligence_v1p3beta1asvideointelligence# path = 'path_to_file'# project_id = 'project_id'# model_id = 'automl_action_recognition_model_id'client=videointelligence.StreamingVideoIntelligenceServiceClient()model_path="projects/{}/locations/us-central1/models/{}".format(project_id,model_id)automl_config=videointelligence.StreamingAutomlActionRecognitionConfig(model_name=model_path)video_config=videointelligence.StreamingVideoConfig(feature=videointelligence.StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION,automl_action_recognition_config=automl_config,)# config_request should be the first in the stream of requests.config_request=videointelligence.StreamingAnnotateVideoRequest(video_config=video_config)# Set the chunk size to 5MB (recommended less than 10MB).chunk_size=5*1024*1024defstream_generator():yieldconfig_request# Load file content.# Note: Input videos must have supported video codecs. See# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs# for more details.withio.open(path,"rb")asvideo_file:whileTrue:data=video_file.read(chunk_size)ifnotdata:breakyieldvideointelligence.StreamingAnnotateVideoRequest(input_content=data)requests=stream_generator()# streaming_annotate_video returns a generator.# The default timeout is about 300 seconds.# To process longer videos it should be set to# larger than the length (in seconds) of the video.responses=client.streaming_annotate_video(requests,timeout=900)# Each response corresponds to about 1 second of video.forresponseinresponses:# Check for errors.ifresponse.error.message:print(response.error.message)breakforlabelinresponse.annotation_results.label_annotations:forframeinlabel.frames:print("At {:3d}s segment, {:5.1%}{}".format(frame.time_offset.seconds,frame.confidence,label.entity.entity_id,))
[[["容易理解","easyToUnderstand","thumb-up"],["確實解決了我的問題","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["難以理解","hardToUnderstand","thumb-down"],["資訊或程式碼範例有誤","incorrectInformationOrSampleCode","thumb-down"],["缺少我需要的資訊/範例","missingTheInformationSamplesINeed","thumb-down"],["翻譯問題","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],["上次更新時間:2025-08-17 (世界標準時間)。"],[],[],null,["# Action recognition\n\n| **Beta**\n|\n|\n| This feature is subject to the \"Pre-GA Offerings Terms\" in the General Service Terms section\n| of the [Service Specific Terms](/terms/service-terms#1).\n|\n| Pre-GA features are available \"as is\" and might have limited support.\n|\n| For more information, see the\n| [launch stage descriptions](/products#product-launch-stages).\n\nAction recognition identifies different actions from video clips, such as\nwalking or dancing. Each of the actions may or may not be performed throughout\nthe entire duration of the video.\n\nUsing an AutoML model\n---------------------\n\n### Before you begin\n\nFor background on creating an AutoML model, check out the Vertex AI\n[Beginner's guide](/vertex-ai/docs/beginner/beginners-guide#video). For\ninstructions on how to create your AutoML model,\nsee [Video data](/vertex-ai/docs/training-overview#video_data) in\n\"Develop and use ML models\" in the Vertex AI documentation.\n\n### Use your AutoML model\n\nThe following code sample demonstrates how to use your\n[AutoML model](/vertex-ai/docs/beginner/beginners-guide#video)\nfor action recognition using the streaming client library. \n\n### Python\n\n\nTo authenticate to Video Intelligence, set up Application Default Credentials.\nFor more information, see\n\n[Set up authentication for a local development environment](/docs/authentication/set-up-adc-local-dev-environment).\n\n import io\n\n from google.cloud import videointelligence_v1p3beta1 as videointelligence\n\n # path = 'path_to_file'\n # project_id = 'project_id'\n # model_id = 'automl_action_recognition_model_id'\n\n client = videointelligence.StreamingVideoIntelligenceServiceClient()\n\n model_path = \"projects/{}/locations/us-central1/models/{}\".format(\n project_id, model_id\n )\n\n automl_config = videointelligence.StreamingAutomlActionRecognitionConfig(\n model_name=model_path\n )\n\n video_config = videointelligence.StreamingVideoConfig(\n feature=videointelligence.StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION,\n automl_action_recognition_config=automl_config,\n )\n\n # config_request should be the first in the stream of requests.\n config_request = videointelligence.StreamingAnnotateVideoRequest(\n video_config=video_config\n )\n\n # Set the chunk size to 5MB (recommended less than 10MB).\n chunk_size = 5 * 1024 * 1024\n\n def stream_generator():\n yield config_request\n # Load file content.\n # Note: Input videos must have supported video codecs. See\n # https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs\n # for more details.\n with io.open(path, \"rb\") as video_file:\n while True:\n data = video_file.read(chunk_size)\n if not data:\n break\n yield videointelligence.StreamingAnnotateVideoRequest(\n input_content=data\n )\n\n requests = stream_generator()\n\n # streaming_annotate_video returns a generator.\n # The default timeout is about 300 seconds.\n # To process longer videos it should be set to\n # larger than the length (in seconds) of the video.\n responses = client.streaming_annotate_video(requests, timeout=900)\n\n # Each response corresponds to about 1 second of video.\n for response in responses:\n # Check for errors.\n if response.error.message:\n print(response.error.message)\n break\n\n for label in response.annotation_results.label_annotations:\n for frame in label.frames:\n print(\n \"At {:3d}s segment, {:5.1%} {}\".format(\n frame.time_offset.seconds,\n frame.confidence,\n label.entity.entity_id,\n )\n )\n\n\u003cbr /\u003e"]]