Попробуйте код консольного приложения ниже:
using Microsoft.Azure.CognitiveServices.Vision.Face;
using Microsoft.Azure.CognitiveServices.Vision.Face.Models;
using System;
using System.IO;
using System.Linq;
using System.Threading;
namespace FaceIdentityTest
{
class Program
{
static void Main(string[] args)
{
string persionPicPath = @"<some path>\personPic.jpg";
String[] picsPath = { @"<some path>\pic1.jpg", @"<some path>\pic2.jpg" };
string endpoint = @"https://<your endpoint name>.cognitiveservices.azure.com/";
string subscriptionKey = "<your subscription key>";
IFaceClient faceClient = new FaceClient(
new ApiKeyServiceClientCredentials(subscriptionKey),
new System.Net.Http.DelegatingHandler[] { });
faceClient.Endpoint = endpoint;
// Create an empty PersonGroup
Console.WriteLine("create person group");
string personGroupId = "demogroup";
faceClient.PersonGroup.CreateAsync(personGroupId, "demo group").GetAwaiter().GetResult();
// Define a person named Bill
Console.WriteLine("create a person in group");
var createPersonResult = faceClient.PersonGroupPerson.CreateAsync(
// Id of the PersonGroup that the person belonged to
personGroupId,
// Name of the person
"Bill"
).GetAwaiter().GetResult();
//Add a face to Bill
Console.WriteLine("Add a face to person");
using (Stream s = File.OpenRead(persionPicPath))
{
// Detect faces in the image and add to Anna
faceClient.PersonGroupPerson.AddFaceFromStreamAsync(
personGroupId, createPersonResult.PersonId, s).GetAwaiter().GetResult();
}
//Train person group
Console.WriteLine("start train person group...");
faceClient.PersonGroup.TrainAsync(personGroupId).GetAwaiter().GetResult();
//Check train status
TrainingStatus trainingStatus = null;
while (true)
{
trainingStatus = faceClient.PersonGroup.GetTrainingStatusAsync(personGroupId).GetAwaiter().GetResult();
if (trainingStatus.Status != TrainingStatusType.Running)
{
break;
}
else {
Console.WriteLine("trainning person group...");
}
Thread.Sleep(1000);
}
foreach (var pic in picsPath) {
Console.WriteLine("start identify faces in :" + pic);
using (Stream s = File.OpenRead(pic))
{
var faces = faceClient.Face.DetectWithStreamAsync(s).GetAwaiter().GetResult();
var faceIds = faces.Select(face => (Guid)face.FaceId).ToList();
var results = faceClient.Face.IdentifyAsync(faceIds, personGroupId).GetAwaiter().GetResult();
foreach (var identifyResult in results)
{
Console.WriteLine("Result of face: {0}", identifyResult.FaceId);
if (identifyResult.Candidates.Count == 0)
{
Console.WriteLine("No one identified");
}
else
{
// Get top 1 among all candidates returned
var candidateId = identifyResult.Candidates[0].PersonId;
var person = faceClient.PersonGroupPerson.GetAsync(personGroupId, candidateId).GetAwaiter().GetResult();
Console.WriteLine("Identified as {0}", person.Name);
}
}
}
}
Console.ReadKey();
}
}
}
Мои фото:
Результат:
Кстати, независимо от того, какой язык программирования вы используете, просто следуйте инструкциям в этой демонстрации, чтобы использовать Face API для идентификации лиц.
Надеюсь, это поможет.
Вы можете импортировать Microsoft.Azure.CognitiveServices.Vision.Face здесь в VS: