人物的轨迹 - Amazon Rekognition

人物的轨迹

注意

终止支持通知:2025 年 10 月 31 日,AWS 将停止对 Amazon Rekognition 人物轨迹跟踪的支持。2025 年 10 月 31 日之后,您将无法再使用 Rekognition 人物轨迹跟踪功能。有关更多信息,请访问此博客文章

Amazon Rekognition Video 可以创建视频中的人物轨迹并提供如下信息:

  • 在跟踪人物的轨迹时人物在视频帧中的位置。

  • 人脸标记,如左眼的位置(如果检测到)。

存储视频中的 Amazon Rekognition Video 人物轨迹跟踪是一个异步操作。要开始在视频中跟踪人物轨迹,请调用 StartPersonTracking。Amazon Rekognition Video 会将视频分析的完成状态发布到 Amazon Simple Notification Service 主题。如果视频分析成功,请调用 GetPersonTracking 来获取视频分析结果。有关调用 Amazon Rekognition Video API 操作的更多信息,请参阅 调用 Amazon Rekognition Video 操作

以下过程说明如何通过存储在 Amazon S3 存储桶内的视频跟踪人物轨迹。此示例扩展了 使用 Java 或 Python 分析存储在 Amazon S3 存储桶中的视频 (SDK)(使用 Amazon Simple Queue Service 队列获取视频分析请求的完成状态)中的代码。

检测存储在 Amazon S3 存储桶内的视频中的人员 (SDK)
  1. 执行使用 Java 或 Python 分析存储在 Amazon S3 存储桶中的视频 (SDK)

  2. 将以下代码添加到您在步骤 1 中创建的类 VideoDetect

    Java
    //Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. //PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.) //Persons======================================================================== private static void StartPersonDetection(String bucket, String video) throws Exception{ NotificationChannel channel= new NotificationChannel() .withSNSTopicArn(snsTopicArn) .withRoleArn(roleArn); StartPersonTrackingRequest req = new StartPersonTrackingRequest() .withVideo(new Video() .withS3Object(new S3Object() .withBucket(bucket) .withName(video))) .withNotificationChannel(channel); StartPersonTrackingResult startPersonDetectionResult = rek.startPersonTracking(req); startJobId=startPersonDetectionResult.getJobId(); } private static void GetPersonDetectionResults() throws Exception{ int maxResults=10; String paginationToken=null; GetPersonTrackingResult personTrackingResult=null; do{ if (personTrackingResult !=null){ paginationToken = personTrackingResult.getNextToken(); } personTrackingResult = rek.getPersonTracking(new GetPersonTrackingRequest() .withJobId(startJobId) .withNextToken(paginationToken) .withSortBy(PersonTrackingSortBy.TIMESTAMP) .withMaxResults(maxResults)); VideoMetadata videoMetaData=personTrackingResult.getVideoMetadata(); System.out.println("Format: " + videoMetaData.getFormat()); System.out.println("Codec: " + videoMetaData.getCodec()); System.out.println("Duration: " + videoMetaData.getDurationMillis()); System.out.println("FrameRate: " + videoMetaData.getFrameRate()); //Show persons, confidence and detection times List<PersonDetection> detectedPersons= personTrackingResult.getPersons(); for (PersonDetection detectedPerson: detectedPersons) { long seconds=detectedPerson.getTimestamp()/1000; System.out.print("Sec: " + Long.toString(seconds) + " "); System.out.println("Person Identifier: " + detectedPerson.getPerson().getIndex()); System.out.println(); } } while (personTrackingResult !=null && personTrackingResult.getNextToken() != null); }

    在函数 main 中,将以下行:

    StartLabelDetection(amzn-s3-demo-bucket, video); if (GetSQSMessageSuccess()==true) GetLabelDetectionResults();

    替换为:

    StartPersonDetection(amzn-s3-demo-bucket, video); if (GetSQSMessageSuccess()==true) GetPersonDetectionResults();
    Java V2

    此代码取自 AWS 文档 SDK 示例 GitHub 存储库。请在此处查看完整示例。

    import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.rekognition.RekognitionClient; import software.amazon.awssdk.services.rekognition.model.S3Object; import software.amazon.awssdk.services.rekognition.model.NotificationChannel; import software.amazon.awssdk.services.rekognition.model.StartPersonTrackingRequest; import software.amazon.awssdk.services.rekognition.model.Video; import software.amazon.awssdk.services.rekognition.model.StartPersonTrackingResponse; import software.amazon.awssdk.services.rekognition.model.RekognitionException; import software.amazon.awssdk.services.rekognition.model.GetPersonTrackingResponse; import software.amazon.awssdk.services.rekognition.model.GetPersonTrackingRequest; import software.amazon.awssdk.services.rekognition.model.VideoMetadata; import software.amazon.awssdk.services.rekognition.model.PersonDetection; import java.util.List; /** * Before running this Java V2 code example, set up your development * environment, including your credentials. * * For more information, see the following documentation topic: * * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html */ public class VideoPersonDetection { private static String startJobId = ""; public static void main(String[] args) { final String usage = """ Usage: <bucket> <video> <topicArn> <roleArn> Where: bucket - The name of the bucket in which the video is located (for example, (for example, myBucket).\s video - The name of video (for example, people.mp4).\s topicArn - The ARN of the Amazon Simple Notification Service (Amazon SNS) topic.\s roleArn - The ARN of the AWS Identity and Access Management (IAM) role to use.\s """; if (args.length != 4) { System.out.println(usage); System.exit(1); } String bucket = args[0]; String video = args[1]; String topicArn = args[2]; String roleArn = args[3]; Region region = Region.US_EAST_1; RekognitionClient rekClient = RekognitionClient.builder() .region(region) .build(); NotificationChannel channel = NotificationChannel.builder() .snsTopicArn(topicArn) .roleArn(roleArn) .build(); startPersonLabels(rekClient, channel, bucket, video); getPersonDetectionResults(rekClient); System.out.println("This example is done!"); rekClient.close(); } public static void startPersonLabels(RekognitionClient rekClient, NotificationChannel channel, String bucket, String video) { try { S3Object s3Obj = S3Object.builder() .bucket(bucket) .name(video) .build(); Video vidOb = Video.builder() .s3Object(s3Obj) .build(); StartPersonTrackingRequest personTrackingRequest = StartPersonTrackingRequest.builder() .jobTag("DetectingLabels") .video(vidOb) .notificationChannel(channel) .build(); StartPersonTrackingResponse labelDetectionResponse = rekClient.startPersonTracking(personTrackingRequest); startJobId = labelDetectionResponse.jobId(); } catch (RekognitionException e) { System.out.println(e.getMessage()); System.exit(1); } } public static void getPersonDetectionResults(RekognitionClient rekClient) { try { String paginationToken = null; GetPersonTrackingResponse personTrackingResult = null; boolean finished = false; String status; int yy = 0; do { if (personTrackingResult != null) paginationToken = personTrackingResult.nextToken(); GetPersonTrackingRequest recognitionRequest = GetPersonTrackingRequest.builder() .jobId(startJobId) .nextToken(paginationToken) .maxResults(10) .build(); // Wait until the job succeeds while (!finished) { personTrackingResult = rekClient.getPersonTracking(recognitionRequest); status = personTrackingResult.jobStatusAsString(); if (status.compareTo("SUCCEEDED") == 0) finished = true; else { System.out.println(yy + " status is: " + status); Thread.sleep(1000); } yy++; } finished = false; // Proceed when the job is done - otherwise VideoMetadata is null. VideoMetadata videoMetaData = personTrackingResult.videoMetadata(); System.out.println("Format: " + videoMetaData.format()); System.out.println("Codec: " + videoMetaData.codec()); System.out.println("Duration: " + videoMetaData.durationMillis()); System.out.println("FrameRate: " + videoMetaData.frameRate()); System.out.println("Job"); List<PersonDetection> detectedPersons = personTrackingResult.persons(); for (PersonDetection detectedPerson : detectedPersons) { long seconds = detectedPerson.timestamp() / 1000; System.out.print("Sec: " + seconds + " "); System.out.println("Person Identifier: " + detectedPerson.person().index()); System.out.println(); } } while (personTrackingResult != null && personTrackingResult.nextToken() != null); } catch (RekognitionException | InterruptedException e) { System.out.println(e.getMessage()); System.exit(1); } } }
    Python
    #Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. #PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.) # ============== People pathing =============== def StartPersonPathing(self): response=self.rek.start_person_tracking(Video={'S3Object': {'Bucket': self.bucket, 'Name': self.video}}, NotificationChannel={'RoleArn': self.roleArn, 'SNSTopicArn': self.snsTopicArn}) self.startJobId=response['JobId'] print('Start Job Id: ' + self.startJobId) def GetPersonPathingResults(self): maxResults = 10 paginationToken = '' finished = False while finished == False: response = self.rek.get_person_tracking(JobId=self.startJobId, MaxResults=maxResults, NextToken=paginationToken) print('Codec: ' + response['VideoMetadata']['Codec']) print('Duration: ' + str(response['VideoMetadata']['DurationMillis'])) print('Format: ' + response['VideoMetadata']['Format']) print('Frame rate: ' + str(response['VideoMetadata']['FrameRate'])) print() for personDetection in response['Persons']: print('Index: ' + str(personDetection['Person']['Index'])) print('Timestamp: ' + str(personDetection['Timestamp'])) print() if 'NextToken' in response: paginationToken = response['NextToken'] else: finished = True

    在函数 main 中,将以下行:

    analyzer.StartLabelDetection() if analyzer.GetSQSMessageSuccess()==True: analyzer.GetLabelDetectionResults()

    替换为:

    analyzer.StartPersonPathing() if analyzer.GetSQSMessageSuccess()==True: analyzer.GetPersonPathingResults()
    CLI

    运行以下 AWS CLI 命令开始在视频中跟踪人物轨迹。

    aws rekognition start-person-tracking --video "{"S3Object":{"Bucket":"amzn-s3-demo-bucket","Name":"video-name"}}" \ --notification-channel "{"SNSTopicArn":"topic-ARN","RoleArn":"role-ARN"}" \ --region region-name --profile profile-name

    更新以下值:

    • amzn-s3-demo-bucketvideo-name更改为您在步骤 2 中指定的 Amazon S3 存储桶名称和文件名。

    • region-name 更改为您使用的 AWS 区域。

    • 将创建 Rekognition 会话的行中的profile-name值替换为您的开发人员资料的名称。

    • topic-ARN 更改为您在 配置 Amazon Rekognition Video 的步骤 3 中创建的 Amazon SNS 主题的 ARN。

    • role-ARN 更改为您在 配置 Amazon Rekognition Video 的步骤 7 中创建的 IAM 服务角色的 ARN。

    如果您在 Windows 设备上访问 CLI,请使用双引号代替单引号,并用反斜杠(即 \)对内部双引号进行转义,以解决可能遇到的任何解析器错误。有关示例,请参阅以下内容:

    aws rekognition start-person-tracking --video "{\"S3Object\":{\"Bucket\":\"amzn-s3-demo-bucket\",\"Name\":\"video-name\"}}" --notification-channel "{\"SNSTopicArn\":\"topic-ARN\",\"RoleArn\":\"role-ARN\"}" \ --region region-name --profile profile-name

    运行后续代码示例后,复制返回的 jobID 并将其提供给以下 GetPersonTracking 命令以获取结果,将 job-id-number 替换为您之前收到的 jobID

    aws rekognition get-person-tracking --job-id job-id-number
    注意

    如果您已经运行了除 使用 Java 或 Python 分析存储在 Amazon S3 存储桶中的视频 (SDK) 之外的视频示例,则要替换的代码可能会有所不同。

  3. 运行该代码。将显示被跟踪人员的唯一标识符以及跟踪人物轨迹的时间(秒)。

GetPersonTracking 操作响应

GetPersonTracking 将返回一个数组Persons,其中包括 PersonDetection 对象,这些对象包含有关所检测人物的详细信息以及跟踪人物轨迹的时间。

您可通过使用 Persons 输入参数来为 SortBy 排序。指定 TIMESTAMP 以按在视频中检测人物轨迹的时间为元素排序。指定 INDEX 以按在视频中跟踪的人员排序。在每组人物的结果中,元素是按人物轨迹跟踪的准确性的置信度的降序顺序排序的。默认情况下,Persons 将按 TIMESTAMP 返回/排序。以下示例是 GetPersonDetection 的 JSON 响应。结果按在视频中跟踪人物轨迹的时间(播放视频的毫秒数)排序。在响应中,请注意以下内容:

  • 人员信息PersonDetection 数组元素包含有关检测到的人员的信息。例如,检测到人员的时间(Timestamp)、检测到时人员在视频帧中的位置(BoundingBox)以及 Amazon Rekognition Video 对正确检测到人员的置信度(Confidence)。

    跟踪人物轨迹的每个时间戳不会返回面部特征。而且,在被跟踪人员的身体可能不可见的某些情况下,仅返回其脸部位置。

  • 分页信息 – 此示例显示一页的人员检测信息。您可以在 MaxResultsGetPersonTracking 输入参数中指定要返回的人员元素数量。如果存在的结果的数量超过了 MaxResults,则 GetPersonTracking 会返回一个令牌 (NextToken),用于获取下一页的结果。有关更多信息,请参阅 获取 Amazon Rekognition Video 分析结果

  • 索引 – 用于在整个视频中识别人物的唯一标识符。

  • 视频信息 – 此响应包含有关由 GetPersonDetection 返回的每页信息中的视频格式(VideoMetadata)的信息。

{ "JobStatus": "SUCCEEDED", "NextToken": "AcDymG0fSSoaI6+BBYpka5wVlqttysSPP8VvWcujMDluj1QpFo/vf+mrMoqBGk8eUEiFlllR6g==", "Persons": [ { "Person": { "BoundingBox": { "Height": 0.8787037134170532, "Left": 0.00572916679084301, "Top": 0.12129629403352737, "Width": 0.21666666865348816 }, "Face": { "BoundingBox": { "Height": 0.20000000298023224, "Left": 0.029999999329447746, "Top": 0.2199999988079071, "Width": 0.11249999701976776 }, "Confidence": 99.85971069335938, "Landmarks": [ { "Type": "eyeLeft", "X": 0.06842322647571564, "Y": 0.3010137975215912 }, { "Type": "eyeRight", "X": 0.10543643683195114, "Y": 0.29697132110595703 }, { "Type": "nose", "X": 0.09569807350635529, "Y": 0.33701086044311523 }, { "Type": "mouthLeft", "X": 0.0732642263174057, "Y": 0.3757539987564087 }, { "Type": "mouthRight", "X": 0.10589495301246643, "Y": 0.3722417950630188 } ], "Pose": { "Pitch": -0.5589138865470886, "Roll": -5.1093974113464355, "Yaw": 18.69594955444336 }, "Quality": { "Brightness": 43.052337646484375, "Sharpness": 99.68138885498047 } }, "Index": 0 }, "Timestamp": 0 }, { "Person": { "BoundingBox": { "Height": 0.9074074029922485, "Left": 0.24791666865348816, "Top": 0.09259258955717087, "Width": 0.375 }, "Face": { "BoundingBox": { "Height": 0.23000000417232513, "Left": 0.42500001192092896, "Top": 0.16333332657814026, "Width": 0.12937499582767487 }, "Confidence": 99.97504425048828, "Landmarks": [ { "Type": "eyeLeft", "X": 0.46415066719055176, "Y": 0.2572723925113678 }, { "Type": "eyeRight", "X": 0.5068183541297913, "Y": 0.23705792427062988 }, { "Type": "nose", "X": 0.49765899777412415, "Y": 0.28383663296699524 }, { "Type": "mouthLeft", "X": 0.487221896648407, "Y": 0.3452930748462677 }, { "Type": "mouthRight", "X": 0.5142884850502014, "Y": 0.33167609572410583 } ], "Pose": { "Pitch": 15.966927528381348, "Roll": -15.547388076782227, "Yaw": 11.34195613861084 }, "Quality": { "Brightness": 44.80223083496094, "Sharpness": 99.95819854736328 } }, "Index": 1 }, "Timestamp": 0 }..... ], "VideoMetadata": { "Codec": "h264", "DurationMillis": 67301, "FileExtension": "mp4", "Format": "QuickTime / MOV", "FrameHeight": 1080, "FrameRate": 29.970029830932617, "FrameWidth": 1920 } }