2017-12-01 17:00:49 zhenghongzhi6 阅读数 7827

本文首发于“洪流学堂”公众号。
洪流学堂,让你快人几步

源码地址

https://github.com/zhenghongzhi/WitBaiduAip

功能概述

1 语音识别

  1. 从麦克风录制音频
  2. AudioClip的音频数据转换为百度语音识别的PCM16格式
  3. 百度语音识别Restful接口的封装以及一个测试场景

2 语音合成

  1. 百度语音合成Restful接口的封装以及一个测试场景
  2. mp3格式运行时转为AudioClip进行播放

为什么不使用百度的C# SDK
百度的C# SDK使用了一些Unity不支持的特性,直接导入unity不能用
而且百度C# SDK只是封装了Restful的接口,功能上并没有增多
自己编写更简洁

更新说明

2018-08-22更新

根据百度API的更新,语音合成性能优化,直接使用原生格式,移除第三方插件

2018-03-28更新

加入平台判断,更好的支持android和ios

2018-01-11更新

在工程中加入了语音合成

2018-01-02更新

应广大小伙伴的要求,对工程进行了重构,放出github源码
https://github.com/zhenghongzhi/WitBaiduAip

2017-12-23更新

教程首发


洪流学堂,让你快人几步
欢迎关注“洪流学堂”微信公众号

2018-03-19 23:27:54 fengmao31 阅读数 708
using NAudio.Wave;
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Text;
using UnityEngine;
using Xfrog.Net;
public class AsrResponse
{
    public int err_no;
    public string err_msg;
    public string sn;
    public string[] result;
    public static AsrResponse CreateFromJSON(string jsonString)
    {
        return JsonUtility.FromJson<AsrResponse>(jsonString);
    }
}
public class record : MonoBehaviour
{
    AudioClip audioClip;
    AudioSource audioSource;

    public int recordTime=5;
    //// Use this for initialization
    void Start()
    {   audioSource = GameObject.Find("Canvas/Audio Source").GetComponent<AudioSource>();
        string[] md = Microphone.devices;
        int mdl = md.Length;
        if (mdl == 0)
        {
            Debug.Log("no microphone found");
        }
    }

    //// Update is called once per frame
    //void Update () {

    //}
    public void StartRecordAudio()
    {
   
        Microphone.End(null);
        audioClip = Microphone.Start(null, false, recordTime, 16000);
        Debug.Log("开始录音.....");
        // if(Microphone.GetPosition())
        if (!Microphone.IsRecording(null))
        {
            Debug.Log("没有声音.....");
            return;
        }
        Microphone.GetPosition(null);

    }

    public void StopRecordAudio()
    {
        /***文件读取为字节流***
        FileInfo fi = new FileInfo("d:\\1.wav");
        FileStream fs = new FileStream("d:\\1.wav", FileMode.Open);
        byte[] buffer = new byte[fs.Length];
        fs.Read(buffer, 0, buffer.Length);
        fs.Close();
        ***/
        Microphone.End(null);



        //*************使用语音识别api
        byte[] buffer = ConvertAudioClipToPCM16(audioClip);
       //byte[] buffer = GetClipData();    
        HttpWebRequest request = null;
     //request = (HttpWebRequest)HttpWebRequest.Create("https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?language=ZH-CN&format=detailed");
      request = (HttpWebRequest)HttpWebRequest.Create("http://vop.baidu.com/server_api?lan=zh&cuid=B8-81-98-41-3E-E9&token=24.91d00cdafeef1490ec706f7e2f2659e1.2592000.1524029061.282335-10681472");
       request.SendChunked = true;
        request.Accept = @"application/json;text/xml";
        request.Method = "POST";
       request.ProtocolVersion = HttpVersion.Version11;
       // request.ContentType = @"audio/wav; codec=audio/pcm; samplerate=16000";
        request.ContentType = @"audio/pcm; rate = 16000";
       request.Headers["Ocp-Apim-Subscription-Key"] = "e8cd273d62c347cb9f64d6b94b94435d";
        request.ContentLength = buffer.Length;
        // Send an audio file by 1024 byte chunks
        /*
        * Open a request stream and write 1024 byte chunks in the stream one at a time.
        */
        using (Stream requestStream = request.GetRequestStream())
        {
            requestStream.Write(buffer, 0, buffer.Length);
        }
        Debug.Log("Response:");
        string responseString;
        WebResponse response = request.GetResponse();
        Debug.Log(((HttpWebResponse)response).StatusCode);
        StreamReader sr = new StreamReader(response.GetResponseStream());
        responseString = sr.ReadToEnd();
        responseString = AsrResponse.CreateFromJSON(responseString).result[0];
        Debug.Log(responseString);



        //************图灵api得到回答
        string url = "http://www.tuling123.com/openapi/api";
        string key = "7c664d28fa0b472ab9833c2679c431f5";
        string postDataStr = "key=" + key + "&info=" + responseString;
        string result = HttpGet(url, postDataStr);
        JsonObject newObj = new JsonObject(result);
        string info = newObj["text"].Value.ToString();
        Debug.Log(info);

        ////**************合成语音不支持linux和window只支持移动端,据说mp3版权问题,无法直接在window上播放,也就是无法从网上或者文件里读取播放。场景里的应该被转编码了。
        //string url_speaker = "http://tsn.baidu.com/text2audio";
        //string postDataStr_speaker = "tex=" + info + "&lan=zh&cuid=B8-81-98-41-3E-E9&ctp=1&tok=24.d1ba8c1f1efa8a3de68678e5404d55a4.2592000.1523629153.282335-10681472&ctp=1&cuid=10681472";
        //string req = url_speaker + "?" + postDataStr_speaker;
        //WWW www = new WWW(req);  // start a download of the given URL
        //audioSource.clip = www.GetAudioClip(true, false, AudioType.MPEG); // 2D, streaming
        //audioSource.Play();



        //**************合成语音
        string url2 = "http://tsn.baidu.com/text2audio";
        byte[] buffer2 = null;
        string postDataStr2 = "tex=" + info + "&lan=zh&cuid=B8-81-98-41-3E-E9&ctp=1&tok=24.d1ba8c1f1efa8a3de68678e5404d55a4.2592000.1523629153.282335-10681472";

        HttpWebRequest request2 = (HttpWebRequest)WebRequest.Create(url2 + "?" + postDataStr2);
        request2.Method = "GET";
        request2.ContentType = "text/html;charset=UTF-8";
        HttpWebResponse response2 = (HttpWebResponse)request2.GetResponse();

        using (Stream stream = response2.GetResponseStream())
        {
            //buffer2 = new byte[stream.Length];  ////////////////报错*******无法取得响应流,应该用什么先盛放一下

            // long length = request2.ContentLength;
            //buffer2 = new byte[length];///数字溢出
            // stream.Read(buffer2, 0, (int)length);
            //BinaryReader br = new BinaryReader(stream);
            //    buffer2 = br.ReadBytes((int)stream.Length);
         
                byte[] buffer3 = new byte[16*1096];
                using (MemoryStream memoryStream = new MemoryStream())
                {
                    int count = 0;
                    do
                    {
                        count = stream.Read(buffer3, 0, buffer3.Length);
                        memoryStream.Write(buffer3, 0, count);

                    } while (count != 0);

                    buffer2 = memoryStream.ToArray();

                }
          


        }
        audioSource.clip = FromMp3Data(buffer2);
        audioSource.Play();
    }
    /// <summary>
    /// 将mp3格式的字节数组转换为audioclip
    /// </summary>
    /// <param name="data"></param>
    /// <returns></returns>

    public static AudioClip FromMp3Data(byte[] data)
    {
        // Load the data into a stream  
        MemoryStream mp3stream = new MemoryStream(data);
        // Convert the data in the stream to WAV format  
        Mp3FileReader mp3audio = new Mp3FileReader(mp3stream);

        WaveStream waveStream = WaveFormatConversionStream.CreatePcmStream(mp3audio);
        // Convert to WAV data  
        Wav wav = new Wav(AudioMemStream(waveStream).ToArray());

        AudioClip audioClip = AudioClip.Create("testSound", wav.SampleCount, 1, wav.Frequency, false);
        audioClip.SetData(wav.LeftChannel, 0);
        // Return the clip  
        return audioClip;
    }

    private static MemoryStream AudioMemStream(WaveStream waveStream)
    {
        MemoryStream outputStream = new MemoryStream();
        using (WaveFileWriter waveFileWriter = new WaveFileWriter(outputStream, waveStream.WaveFormat))
        {
            byte[] bytes = new byte[waveStream.Length];
            waveStream.Position = 0;
            waveStream.Read(bytes, 0, Convert.ToInt32(waveStream.Length));
            waveFileWriter.Write(bytes, 0, bytes.Length);
            waveFileWriter.Flush();
        }
        return outputStream;
    }
    //**********audioClip格式转成字节流
    public static byte[] ConvertAudioClipToPCM16(AudioClip clip)
    {
        var samples = new float[clip.samples * clip.channels];
        clip.GetData(samples, 0);
        var samples_int16 = new short[samples.Length];

        for (var index = 0; index < samples.Length; index++)
        {
            var f = samples[index];
            samples_int16[index] = (short)(f * short.MaxValue);
        }

        var byteArray = new byte[samples_int16.Length * 2];
        Buffer.BlockCopy(samples_int16, 0, byteArray, 0, byteArray.Length);

        return byteArray;

    }
    ////************把mp3转换成audioclip
    //public static AudioClip FromMp3Data(byte[] data)
    //{
    //    // Load the data into a stream  
    //    MemoryStream mp3stream = new MemoryStream(data);
    //    // Convert the data in the stream to WAV format  
    //    Mp3FileReader mp3audio = new Mp3FileReader(mp3stream);

    //    WaveStream waveStream = WaveFormatConversionStream.CreatePcmStream(mp3audio);
    //    // Convert to WAV data  
    //    Wav wav = new Wav(AudioMemStream(waveStream).ToArray());

    //    AudioClip audioClip = AudioClip.Create("testSound", wav.SampleCount, 1, wav.Frequency, false);
    //    audioClip.SetData(wav.LeftChannel, 0);
    //    // Return the clip  
    //    return audioClip;
    //}

    //用于http get请求
    public static string HttpGet(string Url, string postDataStr)
    {
        HttpWebRequest request = (HttpWebRequest)WebRequest.Create(Url + (postDataStr == "" ? "" : "?") + postDataStr);
        request.Method = "GET";
        request.ContentType = "text/html;charset=UTF-8";
        HttpWebResponse response = (HttpWebResponse)request.GetResponse();
        Stream myResponseStream = response.GetResponseStream();
        StreamReader myStreamReader = new StreamReader(myResponseStream, Encoding.UTF8);
        string retString = myStreamReader.ReadToEnd();
        myStreamReader.Close();
        myResponseStream.Close();
        return retString;


    }
    public void PlayRecordAudio()
    {
        Microphone.End(null);
        audioSource.clip = audioClip;
        audioSource.Play();
    }
    public void EndPlayRecordAudio()
    {
        Microphone.End(null);
        audioSource.Stop();
    }




    /// <summary>
    /// 把录音转换为Byte[]
    /// </summary>
    /// <returns></returns>
    //public byte[] GetClipData()
    //{
    //    if (audioClip == null)
    //    {
    //        //Debug.LogError("录音数据为空");
    //        Debug.Log("录音数据为空");
    //        return null;
    //    }

    //    float[] samples = new float[audioClip.samples];

    //    audioClip.GetData(samples, 0);



    //    byte[] outData = new byte[samples.Length * 2];

    //    int rescaleFactor = 32767; //to convert float to Int16   

    //    for (int i = 0; i < samples.Length; i++)
    //    {
    //        short temshort = (short)(samples[i] * rescaleFactor);

    //        byte[] temdata = System.BitConverter.GetBytes(temshort);

    //        outData[i * 2] = temdata[0];
    //        outData[i * 2 + 1] = temdata[1];
    //    }
    //    if (outData == null || outData.Length <= 0)
    //    {
    //        //Debug.LogError("录音数据为空");
    //        Debug.Log("录音数据为空");
    //        return null;
    //    }

    //    //return SubByte(outData, 0, audioLength * 8000 * 2);
    //    return outData;
    //}
}

项目失败,unity3d windows和ubuntu不支持mp3流,

using NAudio.Wave;

转换只在windows上起效。


所以这份代码只在windows上和移动平台,安卓,ios上起效。

2018-03-19 22:08:27 fengmao31 阅读数 177
  using (Stream stream = response.GetResponseStream())
        {
            buffer2 = new byte[stream.Length];
            stream.Read(buffer2, 0, buffer2.Length);

        }


stream.Length失败



解决方案 

1、用unity自带的www类

2、https://bbs.csdn.net/topics/360163784

byte[] result;
byte[] buffer = new byte[4096];
 
WebRequest wr = WebRequest.Create(someUrl);
 
using(WebResponse response = wr.GetResponse())
{
   using(Stream responseStream = response.GetResponseStream())
   {
      using(MemoryStream memoryStream = new MemoryStream())
      {
         int count = 0;
         do
         {
            count = responseStream.Read(buffer, 0, buffer.Length);
            memoryStream.Write(buffer, 0, count);
 
         } while(count != 0);
 
         result = memoryStream.ToArray();
 
      }
   }
}

2018-07-26 01:43:08 luoyikun 阅读数 1308

转自洪流学堂
语音转文字
1.打开麦克风记录

_clipRecord = Microphone.Start(null, false, 30, 16000);

2.将Unity的AudioClip数据转化为PCM格式16bit数据

/// <summary>
        /// 将Unity的AudioClip数据转化为PCM格式16bit数据
        /// </summary>
        /// <param name="clip"></param>
        /// <returns></returns>
        public static byte[] ConvertAudioClipToPCM16(AudioClip clip)
        {
            var samples = new float[clip.samples * clip.channels];
            clip.GetData(samples, 0);
            var samples_int16 = new short[samples.Length];

            for (var index = 0; index < samples.Length; index++)
            {
                var f = samples[index];
                samples_int16[index] = (short) (f * short.MaxValue);
            }

            var byteArray = new byte[samples_int16.Length * 2];
            Buffer.BlockCopy(samples_int16, 0, byteArray, 0, byteArray.Length);

            return byteArray;
        }

3.将字节流上传到百度语音uri,得到转换后的文本

 public IEnumerator Recognize(byte[] data, Action<AsrResponse> callback)
        {
            yield return PreAction ();

            if (tokenFetchStatus == Base.TokenFetchStatus.Failed) {
                Debug.LogError("Token fetched failed, please check your APIKey and SecretKey");
                yield break;
            }

            var uri = string.Format("{0}?lan=zh&cuid={1}&token={2}", UrlAsr, SystemInfo.deviceUniqueIdentifier, Token);

            var form = new WWWForm();
            form.AddBinaryData("audio", data);
            var www = UnityWebRequest.Post(uri, form);
            www.SetRequestHeader("Content-Type", "audio/pcm;rate=16000");
            yield return www.SendWebRequest();

            if (string.IsNullOrEmpty(www.error))
            {
                Debug.Log(www.downloadHandler.text);
                callback(JsonUtility.FromJson<AsrResponse>(www.downloadHandler.text));
            }
            else
                Debug.LogError(www.error);
        }

文字转语音
1.文本上传百度语音uri,得到字节流

 public IEnumerator Synthesis(string text, Action<TtsResponse> callback, int speed = 5, int pit = 5, int vol = 5,
            Pronouncer per = Pronouncer.Female)
        {
            yield return PreAction();

            if (tokenFetchStatus == Base.TokenFetchStatus.Failed)
            {
                Debug.LogError("Token was fetched failed. Please check your APIKey and SecretKey");
                callback(new TtsResponse()
                {
                    err_no = -1,
                    err_msg = "Token was fetched failed. Please check your APIKey and SecretKey"
                });
                yield break;
            }

            var param = new Dictionary<string, string>();
            param.Add("tex", text);
            param.Add("tok", Token);
            param.Add("cuid", SystemInfo.deviceUniqueIdentifier);
            param.Add("ctp", "1");
            param.Add("lan", "zh");
            param.Add("spd", Mathf.Clamp(speed, 0, 9).ToString());
            param.Add("pit", Mathf.Clamp(pit, 0, 9).ToString());
            param.Add("vol", Mathf.Clamp(vol, 0, 15).ToString());
            param.Add("per", ((int) per).ToString());

            string url = UrlTts;
            int i = 0;
            foreach (var p in param)
            {
                url += i != 0 ? "&" : "?";
                url += p.Key + "=" + p.Value;
                i++;
            }

#if UNITY_STANDALONE || UNITY_EDITOR || UNITY_UWP
            var www = UnityWebRequest.Get(url);
#else
            var www = UnityWebRequestMultimedia.GetAudioClip(url, AudioType.MPEG);
#endif
            Debug.Log(www.url);
            yield return www.SendWebRequest();


            if (string.IsNullOrEmpty(www.error))
            {
                var type = www.GetResponseHeader("Content-Type");
                Debug.Log("response type: " + type);

                if (type == "audio/mp3")
                {
#if UNITY_STANDALONE || UNITY_EDITOR || UNITY_UWP
                    var clip = GetAudioClipFromMP3ByteArray(www.downloadHandler.data);
                    var response = new TtsResponse {clip = clip};
#else
                    var response = new TtsResponse {clip = DownloadHandlerAudioClip.GetContent(www) };
#endif
                    callback(response);
                }
                else
                {
                    Debug.LogError(www.downloadHandler.text);
                    callback(JsonUtility.FromJson<TtsResponse>(www.downloadHandler.text));
                }
            }
            else
                Debug.LogError(www.error);
        }

2.字节流转化为AudioClip播放

private AudioClip GetAudioClipFromMP3ByteArray(byte[] mp3Data)
        {
            var mp3MemoryStream = new MemoryStream(mp3Data);
            MP3Sharp.MP3Stream mp3Stream = new MP3Sharp.MP3Stream(mp3MemoryStream);

            //Get the converted stream data
            MemoryStream convertedAudioStream = new MemoryStream();
            byte[] buffer = new byte[2048];
            int bytesReturned = -1;
            int totalBytesReturned = 0;

            while (bytesReturned != 0)
            {
                bytesReturned = mp3Stream.Read(buffer, 0, buffer.Length);
                convertedAudioStream.Write(buffer, 0, bytesReturned);
                totalBytesReturned += bytesReturned;
            }

            Debug.Log("MP3 file has " + mp3Stream.ChannelCount + " channels with a frequency of " +
                      mp3Stream.Frequency);

            byte[] convertedAudioData = convertedAudioStream.ToArray();

            //bug of mp3sharp that audio with 1 channel has right channel data, to skip them
            byte[] data = new byte[convertedAudioData.Length / 2];
            for (int i = 0; i < data.Length; i += 2)
            {
                data[i] = convertedAudioData[2 * i];
                data[i + 1] = convertedAudioData[2 * i + 1];
            }

            Wav wav = new Wav(data, mp3Stream.ChannelCount, mp3Stream.Frequency);

            AudioClip audioClip = AudioClip.Create("testSound", wav.SampleCount, 1, wav.Frequency, false);
            audioClip.SetData(wav.LeftChannel, 0);

            return audioClip;
        }
2017-10-10 13:04:27 dark00800 阅读数 5279

百度AI开放平台是百度推出的一个人工智能服务平台,该平台提供了很多当下热门技术的解决方案,如人脸识别,语音识别,语音智能等。其中人脸识别的SDK支持很多语言,包括Java,PHP,Python,C#,Node.js,Android和iOS等,使用C#进行脚本语言开发的Unity3d自然也可以很方便的使用这些SKD。

1、下载人脸识别SDK

首先我们需要下载最新版的SDK,打开人脸识别SDK下载页面,选择C# SDK下载:
SKD下载

下载解压后得到一个叫aip-csharp-sdk-3.0.0的文件夹,其中AipSdk.dll提供了我们进行人脸识别开发需要的API,AipSdk.XML是对DLL的注释。thirdparty中包含了sdk的第三方依赖,Demo中是一些使用示例,可以进行参考。
文件目录

2、导入SDK

由于SDK已经被打包成DLL文件,所以我们导入使用起来也十分方便,只需将解压后的文件夹直接导入到工程即可。当然为了方便管理我们可以将SDK导入到Plugins文件夹中,需要注意的是一定要讲第三方依赖一起导入不然会报错。
Plugins目录

导入后可能会有如下错误:
.Net报错
这是由于我们在Player设置中选择的Api Compatibility Level是.Net 2.0 Subset,即.Net 2.0的子集,这里需要改成.Net 2.0。选择Edit->Project Settings->Player,在Other Settings中将Api Compatibility Level更改为.Net 2.0:
.Net2.0

3、创建应用

下面将以人脸检测为示例简单介绍一下SDK的使用。
使用SDK前我们需要先注册一个百度账号,然后登陆百度AI开放平台,创建一个人脸识别应用。
选择控制台并登录:
选择控制台

在控制台已开通服务中选择人脸识别:
人脸识别

然后点击创建应用,输入应用名,应用类型,接口选择(默认创建人脸识别的应用),和应用描述,点击立即创建,创建完毕后点击查看应用详情查看API Key和Secret Key(点击显示查看)
创建应用

完成创建

应用详情

之后可以选择监控报表来查看应用接口调用情况:
监控报表

4、接口调用

百度AI开放平台提供了人脸识别C#版的详细技术文档,下面以实例来进行简单的调用说明。
使用人脸检测功能我们添加Baidu.Aip.Face命名空间,定义一个Face变量用于调用API:

using Baidu.Aip.Face;

private Face client;

client = new Face("API Key", "Secret Key")

实例化Face变量时需要填写我们创建的应用的API Key和Secret Key,可以在应用详情中查看。
进行人脸检测时调用FaceDetect方法:

public JObject FaceDetect(byte[] image, Dictionary<string, object> options = null);

该方法需要传入两个参数,被检测的图片和返回的参数配置,其中可选参数options可以使用默认值null,这时只会返回人脸的位置等基本信息。返回值是一个JObject类型,此类型是第三方依赖中提供的一个json类型。详细调用方法为:

byte[] image = File.ReadAllBytes(Application.streamingAssetsPath + "/1.jpg");
Dictionary<string, object> options = new Dictionary<string, object>()
{
    {"face_fields", "beauty,age,expression,gender" }
};
client.FaceDetect(image, options);

options中的face_fields为请求的参数,类型是string,多个项以逗号分开,不能加在逗号和后一项之间加空格,否则无效。详细参数如下表:

参数 类型 描述
face_fields string 包括age、beauty、expression、faceshape、gender、gla-sses、landmark、race、qualities信息,逗号分隔,默认只返回人脸框、概率和旋转角度。
max_face_num number 最多处理人脸数目,默认值1
image byte[] 图像数据

方法返回的JObject包含一个或多个人脸的信息,也可以如下调用:

client.FaceDetect(image);

此时将只会返回最基本的信息,包括日志id,人脸数目,人脸位置,人脸置信度,竖直方向转角,三维左右旋转角,三维俯仰角,平面旋转角。
所有返回值见下表:

参数 类型 是否一定输出 描述
log_id number 日志id
result_num number 人脸数目
result array 人脸属性对象的集合
+age number 年龄。face_fields包含age时返回
+beauty number 美丑打分,范围0-1,越大表示越美。face_fields包含beauty时返回
+location array 人脸在图片中的位置
++left number 人脸区域离左边界的距离
++top number 人脸区域离上边界的距离
++width number 人脸区域的宽度
++height number 人脸区域的高度
+face_probability number 人脸置信度,范围0-1
+rotation_angle number 人脸框相对于竖直方向的顺时针旋转角,[-180,180]
+yaw number
+pitch number 三维旋转之俯仰角度[-90(上), 90(下)]
+roll number 平面内旋转角[-180(逆时针), 180(顺时针)]
+expression number 表情,0,不笑;1,微笑;2,大笑。face_fields包含expression时返回
+expression_probability number 表情置信度,范围0~1。face_fields包含expression时返回
+faceshape array 脸型置信度。face_fields包含faceshape时返回
++type string 脸型:square/triangle/oval/heart/round
++probability number 置信度:0~1
+gender string male、female。face_fields包含gender时返回
+gender_probability number 性别置信度,范围0~1。face_fields包含gender时返回
+glasses number 是否带眼镜,0-无眼镜,1-普通眼镜,2-墨镜。face_fields包含glasses时返回
+glasses_probability number 眼镜置信度,范围0~1。face_fields包含glasses时返回
+landmark array 4个关键点位置,左眼中心、右眼中心、鼻尖、嘴中心。face_fields包含landmark时返回
++x number x坐标
++y number y坐标
+landmark72 array 72个特征点位置,示例图 。face_fields包含landmark时返回
++x number x坐标
++y number y坐标
+race string yellow、white、black、arabs。face_fields包含race时返回
+race_probability number 人种置信度,范围0~1。face_fields包含race时返回
+qualities array 人脸质量信息。face_fields包含qualities时返回
++occlusion array 人脸各部分遮挡的概率, [0, 1] (待上线)
+++left_eye number 左眼
+++right_eye number 右眼
+++nose number 鼻子
+++mouth number
+++left_cheek number 左脸颊
+++right_cheek number 右脸颊
+++chin number 下巴
++type array 真实人脸/卡通人脸置信度
+++human number 真实人脸置信度,[0, 1]
+++cartoon number 卡通人脸置信度,[0, 1]

运行时可能会报错:
运行报错
这是网页端身份安全验证失败导致的,我们需要在程序运行时手动添加安全证书,在Awake方法中加入:

System.Net.ServicePointManager.ServerCertificateValidationCallback +=
               delegate (object sender, System.Security.Cryptography.X509Certificates.X509Certificate certificate,
                           System.Security.Cryptography.X509Certificates.X509Chain chain,
                           System.Net.Security.SslPolicyErrors sslPolicyErrors)
                {
                   return true; // **** Always accept
                };

我们可以用Text控件来查看具体的返回值:
TestFace.cs:

using System.Collections.Generic;
using System.IO;
using Baidu.Aip.Face;
using UnityEngine;
using UnityEngine.UI;

public class TestFace : MonoBehaviour
{

    public Text debugInfo;

    private Face client;
    private byte[] image;
    private Dictionary<string, object> options;

    private void Awake()
    {
        System.Net.ServicePointManager.ServerCertificateValidationCallback +=
               delegate (object sender, System.Security.Cryptography.X509Certificates.X509Certificate certificate,
                           System.Security.Cryptography.X509Certificates.X509Chain chain,
                           System.Net.Security.SslPolicyErrors sslPolicyErrors)
               {
                   return true; // **** Always accept
            };

        client = new Face("API Key", "Secret Key");
        image = File.ReadAllBytes(Application.streamingAssetsPath + "/1.jpg");
        options = new Dictionary<string, object>()
        {
            {"face_fields", "beauty,age,expression,gender" }
        };
    }

    public void StartDetect()
    {
        var result = client.FaceDetect(image);//, options);
        debugInfo.text = result.ToString();
    }
}

运行结果:
运行结果

By:蒋志杰

没有更多推荐了,返回首页