web服务版智能语音对话

Posted 一个有意境的名字

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了web服务版智能语音对话相关的知识,希望对你有一定的参考价值。

在前几篇的基础上,我们有了语音识别,语音合成,智能机器人,那么我们是不是可以创建一个可以实时对象的机器人了?

当然可以!

一,web版智能对话

前提:你得会flask和websocket

1 ,创建flask项目

#!/usr/bin/env python
# -*- coding:utf8 -*-

import os
from flask import Flask, render_template, send_file


app = Flask(__name__)


@app.route("/index")
def index():
    return render_template("index1.html")


@app.route("/get_audio/<file_path>/<file_name>")
def get_audio(file_path, file_name):
    new_file_path = os.path.join(file_path, file_name)
    return send_file(new_file_path)


if __name__ == __main__:
    app.run("127.0.0.1", 8000, debug=True)

index.html:用的时候粘贴过去就行!

技术分享图片
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Title</title>

</head>
<body>
<audio src="" autoplay controls id="player"></audio>  <!-- 该标签在有src之后,autoplay属性控制着自动播放-->

<button onclick="start_reco()">录制消息</button>
<br>
<button onclick="stop_reco()">发送语音消息</button>

</body>
<script src="../static/Recorder.js"></script>
<script type="application/javascript">
    var serv = "http://127.0.0.1:8000";
    var ws_serv = "ws://127.0.0.1:8080/ws";

    var get_music = serv + "/get_audio/";
    var ws = new WebSocket(ws_serv);
    ws.onmessage = function (data) {  // 拿到后端返回的回答的语音文件的路径,再去请求该文件,自动播放
        {#console.log(data.data);#}
        document.getElementById("player").src = get_music + data.data
    };
    var reco = null;
    var audio_context = new AudioContext();  // 获取浏览器的所有媒体对象
    navigator.getUserMedia = (navigator.getUserMedia ||
        navigator.webkitGetUserMedia ||
        navigator.mozGetUserMedia ||
        navigator.msGetUserMedia);

    navigator.getUserMedia({audio: true}, create_stream, function (err) {
        console.log(err)
    });

    function create_stream(user_media) {
        var stream_input = audio_context.createMediaStreamSource(user_media);  // 创建一个 流 的容器,存放二进制语音
        reco = new Recorder(stream_input);
    }

    function start_reco() {
        reco.record();  // 把语音的二进制文件保存在 流 中
    }

    function stop_reco() {
        reco.stop();  // 停止存入
        get_audio();
        reco.clear();  // 把 流 清空,方便第二次使用
    }

    function get_audio() {  // 获取一个wav语音文件
        reco.exportWAV(function (wav_file) {
            // wav_file = Blob对象
            ws.send(wav_file);  // 发送给后端
        })
    }
    

</script>
</html>
View Code

里面依赖得Record.js文件,用于音频文件二进制存储在流那个容器中,用的时候粘贴过去就行。

技术分享图片
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Recorder = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module ‘"+o+"‘");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
"use strict";

module.exports = require("./recorder").Recorder;

},{"./recorder":2}],2:[function(require,module,exports){
‘use strict‘;

var _createClass = (function () {
    function defineProperties(target, props) {
        for (var i = 0; i < props.length; i++) {
            var descriptor = props[i];descriptor.enumerable = descriptor.enumerable || false;descriptor.configurable = true;if ("value" in descriptor) descriptor.writable = true;Object.defineProperty(target, descriptor.key, descriptor);
        }
    }return function (Constructor, protoProps, staticProps) {
        if (protoProps) defineProperties(Constructor.prototype, protoProps);if (staticProps) defineProperties(Constructor, staticProps);return Constructor;
    };
})();

Object.defineProperty(exports, "__esModule", {
    value: true
});
exports.Recorder = undefined;

var _inlineWorker = require(‘inline-worker‘);

var _inlineWorker2 = _interopRequireDefault(_inlineWorker);

function _interopRequireDefault(obj) {
    return obj && obj.__esModule ? obj : { default: obj };
}

function _classCallCheck(instance, Constructor) {
    if (!(instance instanceof Constructor)) {
        throw new TypeError("Cannot call a class as a function");
    }
}

var Recorder = exports.Recorder = (function () {
    function Recorder(source, cfg) {
        var _this = this;

        _classCallCheck(this, Recorder);

        this.config = {
            bufferLen: 4096,
            numChannels: 2,
            mimeType: ‘audio_pcm/wav‘
        };
        this.recording = false;
        this.callbacks = {
            getBuffer: [],
            exportWAV: []
        };

        Object.assign(this.config, cfg);
        this.context = source.context;
        this.node = (this.context.createScriptProcessor || this.context.createJavaScriptNode).call(this.context, this.config.bufferLen, this.config.numChannels, this.config.numChannels);

        this.node.onaudioprocess = function (e) {
            if (!_this.recording) return;

            var buffer = [];
            for (var channel = 0; channel < _this.config.numChannels; channel++) {
                buffer.push(e.inputBuffer.getChannelData(channel));
            }
            _this.worker.postMessage({
                command: ‘record‘,
                buffer: buffer
            });
        };

        source.connect(this.node);
        this.node.connect(this.context.destination); //this should not be necessary

        var self = {};
        this.worker = new _inlineWorker2.default(function () {
            var recLength = 0,
                recBuffers = [],
                sampleRate = undefined,
                numChannels = undefined;

            self.onmessage = function (e) {
                switch (e.data.command) {
                    case ‘init‘:
                        init(e.data.config);
                        break;
                    case ‘record‘:
                        record(e.data.buffer);
                        break;
                    case ‘exportWAV‘:
                        exportWAV(e.data.type);
                        break;
                    case ‘getBuffer‘:
                        getBuffer();
                        break;
                    case ‘clear‘:
                        clear();
                        break;
                }
            };

            function init(config) {
                sampleRate = config.sampleRate;
                numChannels = config.numChannels;
                initBuffers();
            }

            function record(inputBuffer) {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel].push(inputBuffer[channel]);
                }
                recLength += inputBuffer[0].length;
            }

            function exportWAV(type) {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                var interleaved = undefined;
                if (numChannels === 2) {
                    interleaved = interleave(buffers[0], buffers[1]);
                } else {
                    interleaved = buffers[0];
                }
                var dataview = encodeWAV(interleaved);
                var audioBlob = new Blob([dataview], { type: type });

                self.postMessage({ command: ‘exportWAV‘, data: audioBlob });
            }

            function getBuffer() {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                self.postMessage({ command: ‘getBuffer‘, data: buffers });
            }

            function clear() {
                recLength = 0;
                recBuffers = [];
                initBuffers();
            }

            function initBuffers() {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel] = [];
                }
            }

            function mergeBuffers(recBuffers, recLength) {
                var result = new Float32Array(recLength);
                var offset = 0;
                for (var i = 0; i < recBuffers.length; i++) {
                    result.set(recBuffers[i], offset);
                    offset += recBuffers[i].length;
                }
                return result;
            }

            function interleave(inputL, inputR) {
                var length = inputL.length + inputR.length;
                var result = new Float32Array(length);

                var index = 0,
                    inputIndex = 0;

                while (index < length) {
                    result[index++] = inputL[inputIndex];
                    result[index++] = inputR[inputIndex];
                    inputIndex++;
                }
                return result;
            }

            function floatTo16BitPCM(output, offset, input) {
                for (var i = 0; i < input.length; i++, offset += 2) {
                    var s = Math.max(-1, Math.min(1, input[i]));
                    output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
                }
            }

            function writeString(view, offset, string) {
                for (var i = 0; i < string.length; i++) {
                    view.setUint8(offset + i, string.charCodeAt(i));
                }
            }

            function encodeWAV(samples) {
                var buffer = new ArrayBuffer(44 + samples.length * 2);
                var view = new DataView(buffer);

                /* RIFF identifier */
                writeString(view, 0, ‘RIFF‘);
                /* RIFF chunk length */
                view.setUint32(4, 36 + samples.length * 2, true);
                /* RIFF type */
                writeString(view, 8, ‘WAVE‘);
                /* format chunk identifier */
                writeString(view, 12, ‘fmt ‘);
                /* format chunk length */
                view.setUint32(16, 16, true);
                /* sample format (raw) */
                view.setUint16(20, 1, true);
                /* channel count */
                view.setUint16(22, numChannels, true);
                /* sample rate */
                view.setUint32(24, sampleRate, true);
                /* byte rate (sample rate * block align) */
                view.setUint32(28, sampleRate * 4, true);
                /* block align (channel count * bytes per sample) */
                view.setUint16(32, numChannels * 2, true);
                /* bits per sample */
                view.setUint16(34, 16, true);
                /* data chunk identifier */
                writeString(view, 36, ‘data‘);
                /* data chunk length */
                view.setUint32(40, samples.length * 2, true);

                floatTo16BitPCM(view, 44, samples);

                return view;
            }
        }, self);

        this.worker.postMessage({
            command: ‘init‘,
            config: {
                sampleRate: this.context.sampleRate,
                numChannels: this.config.numChannels
            }
        });

        this.worker.onmessage = function (e) {
            var cb = _this.callbacks[e.data.command].pop();
            if (typeof cb == ‘function‘) {
                cb(e.data.data);
            }
        };
    }

    _createClass(Recorder, [{
        key: ‘record‘,
        value: function record() {
            this.recording = true;
        }
    }, {
        key: ‘stop‘,
        value: function stop() {
            this.recording = false;
        }
    }, {
        key: ‘clear‘,
        value: function clear() {
            this.worker.postMessage({ command: ‘clear‘ });
        }
    }, {
        key: ‘getBuffer‘,
        value: function getBuffer(cb) {
            cb = cb || this.config.callback;
            if (!cb) throw new Error(‘Callback not set‘);

            this.callbacks.getBuffer.push(cb);

            this.worker.postMessage({ command: ‘getBuffer‘ });
        }
    }, {
        key: ‘exportWAV‘,
        value: function exportWAV(cb, mimeType) {
            mimeType = mimeType || this.config.mimeType;
            cb = cb || this.config.callback;
            if (!cb) throw new Error(‘Callback not set‘);

            this.callbacks.exportWAV.push(cb);

            this.worker.postMessage({
                command: ‘exportWAV‘,
                type: mimeType
            });
        }
    }], [{
        key: ‘forceDownload‘,
        value: function forceDownload(blob, filename) {
            var url = (window.URL || window.webkitURL).createObjectURL(blob);
            var link = window.document.createElement(‘a‘);
            link.href = url;
            link.download = filename || ‘output.wav‘;
            var click = document.createEvent("Event");
            click.initEvent("click", true, true);
            link.dispatchEvent(click);
        }
    }]);

    return Recorder;
})();

exports.default = Recorder;

},{"inline-worker":3}],3:[function(require,module,exports){
"use strict";

module.exports = require("./inline-worker");
},{"./inline-worker":4}],4:[function(require,module,exports){
(function (global){
"use strict";

var _createClass = (function () { function defineProperties(target, props) { for (var key in props) { var prop = props[key]; prop.configurable = true; if (prop.value) prop.writable = true; } Object.defineProperties(target, props); } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();

var _classCallCheck = function (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } };

var WORKER_ENABLED = !!(global === global.window && global.URL && global.Blob && global.Worker);

var InlineWorker = (function () {
  function InlineWorker(func, self) {
    var _this = this;

    _classCallCheck(this, InlineWorker);

    if (WORKER_ENABLED) {
      var functionBody = func.toString().trim().match(/^functions*w*s*([ws,]*)s*{([wW]*?)}$/)[1];
      var url = global.URL.createObjectURL(new global.Blob([functionBody], { type: "text/javascript" }));

      return new global.Worker(url);
    }

    this.self = self;
    this.self.postMessage = function (data) {
      setTimeout(function () {
        _this.onmessage({ data: data });
      }, 0);
    };

    setTimeout(function () {
      func.call(self);
    }, 0);
  }

  _createClass(InlineWorker, {
    postMessage: {
      value: function postMessage(data) {
        var _this = this;

        setTimeout(function () {
          _this.self.onmessage({ data: data });
        }, 0);
      }
    }
  });

  return InlineWorker;
})();

module.exports = InlineWorker;
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}]},{},[1])(1)
});
Record.js

2 ,在index页面内置ws,实时对话

#!/usr/bin/env python
# -*- coding:utf8 -*-

import uuid
import os
from flask_ai_demo.nip_demo import get_ret_file
from flask import Flask, request
from geventwebsocket.websocket import WebSocket
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler


app = Flask(__name__)


@app.route("/ws")
def ws():
    user_socket = request.environ.get("wsgi.websocket")  # type:WebSocket
    if user_socket:
        while True:
            try:
                msg = user_socket.receive()
                # q_file_name = f"{uuid.uuid4()}.wav"
                q_file_path = os.path.join("question_audio_pcm", f"{uuid.uuid4()}.wav")
                with open(q_file_path, "wb") as f:
                    f.write(msg)
                ret_file_name = get_ret_file(q_file_path)
                user_socket.send(ret_file_name)  # 把生成好的语音文件名称发给前端,让前端请求获取,播放
                os.remove(q_file_path)  # 删除生成的语音文件
            except Exception as e:
                continue


if __name__ == __main__:
    http_server = WSGIServer(("127.0.0.1", 8080), app, handler_class=WebSocketHandler)
    http_server.serve_forever()

完成后,我们去访问http://127.0.0.1:8000/index页面 和小可爱机器人完了!!!

 

以上是关于web服务版智能语音对话的主要内容,如果未能解决你的问题,请参考以下文章

爆肝一周,用Python在物联网设备上写了个智能语音助手-阿里云智能对话机器人

基于讯飞语音的识别和图灵机器人的具体智能反馈,百度语音的播放,原因是讯飞语音的free版似乎播放做了限制

人工智能 1. 语音合成,语音识别,相似度,图灵机器人,智能对话

结合工程实践选题调研分析同类软件产品

深度讲解手把手教你python制作萝莉音智能对话语音机器人,附全部源码!速速学起来!!

自然语言处理NLP之文本蕴涵智能问答语音识别对话系统文本分类情感计算