将二进制流转化为声音

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了将二进制流转化为声音相关的知识,希望对你有一定的参考价值。

参考技术A this.contextAudio = new AudioContext()

      this.websocket2 = new WebSocket('ws://' + process.env.LISTEN_API)

      this.websocket2.binaryType = 'arraybuffer'

      const that = this

      this.websocket2.onopen = function() 

        console.log('websocket流--open', openMessage)

        that.websocket2.send(openMessage)

      

      this.websocket2.onmessage = function(message) 

        that.handleBuffer(message)

      

handleBuffer(message) 

      let buffer = message.data

      let numberOfChannels = void 0

      let sampleRate = void 0

      let segment = void 0

      const audiostack = []

      // 信道与调距提示判断

      if (buffer.byteLength === 4) 

        var msgView = new DataView(buffer)

        var msgRate = msgView.getUint32(0, true)

        if (msgRate === 0)  // 信道满

          this.$message.error('监听信道已满,请稍后再试!')

          this.contextAudioStop() // 停止

         else  // 调距

          this.inputLength = msgRate * 10

        

        return false

      

      var dataView = new DataView(buffer)

      sampleRate = dataView.getUint32(0, true)

      // 自己封装的头部,前四个字节是采样率,非标准wav头部

      numberOfChannels = 1

      buffer = buffer.slice(4) // 去掉自己封装的前4个字节

      segment = 

      const that = this

      // 解码,ArrayBuffer => audioBuffer

      this.contextAudio.decodeAudioData(this.wavify(buffer, numberOfChannels, sampleRate)).then((audioBuffer) => 

        segment.buffer = audioBuffer

        that.audioStack.push(segment)

        that.decodeAudioTimeout = setTimeout(() => 

          that.scheduleBuffers(audioStack)

        , 50)

      )

    ,

    scheduleBuffers() 

      let nextTime = 0

      clearTimeout(this.decodeAudioTimeout)

      this.decodeAudioTimeout = null

      clearTimeout(this.scheduleBuffersTimeout)

      this.scheduleBuffersTimeout = null

      while (this.audioStack.length > 0 && this.audioStack[0].buffer !== undefined) 

        var currentTime = this.contextAudio.currentTime

        var source = this.contextAudio.createBufferSource()

        var segment = this.audioStack.shift()

        if (this.audioStack.length > 1) 

          segment = this.audioStack.shift()

        

        source.loop = true

        source.buffer = segment.buffer

        source.connect(this.contextAudio.destination)

        if (nextTime === 0) 

          nextTime = currentTime + 0.3

        

        var duration = source.buffer.duration

        if (currentTime > nextTime) 

          nextTime = currentTime

        

        source.start(nextTime, 0)

        source.stop(nextTime + duration)

        nextTime += duration

      

      const that = this

      this.scheduleBuffersTimeout = setTimeout(() => 

        that.scheduleBuffers()

      , 50)

    ,

    concat(buffer1, buffer2) 

      var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength)

      tmp.set(new Uint8Array(buffer1), 0)

      tmp.set(new Uint8Array(buffer2), buffer1.byteLength)

      return tmp.buffer

    ,

    wavify(data, numberOfChannels, sampleRate) 

      var header = new ArrayBuffer(44)

      var d = new DataView(header)

      d.setUint8(0, 'R'.charCodeAt(0))

      d.setUint8(1, 'I'.charCodeAt(0))

      d.setUint8(2, 'F'.charCodeAt(0))

      d.setUint8(3, 'F'.charCodeAt(0))

      d.setUint32(4, data.byteLength / 2 + 44, true)

      d.setUint8(8, 'W'.charCodeAt(0))

      d.setUint8(9, 'A'.charCodeAt(0))

      d.setUint8(10, 'V'.charCodeAt(0))

      d.setUint8(11, 'E'.charCodeAt(0))

      d.setUint8(12, 'f'.charCodeAt(0))

      d.setUint8(13, 'm'.charCodeAt(0))

      d.setUint8(14, 't'.charCodeAt(0))

      d.setUint8(15, ' '.charCodeAt(0))

      d.setUint32(16, 16, true)

      d.setUint16(20, 1, true)

      d.setUint16(22, numberOfChannels, true)

      d.setUint32(24, sampleRate, true)

      d.setUint32(28, sampleRate * 1 * 2)

      d.setUint16(32, numberOfChannels * 2)

      d.setUint16(34, 16, true)

      d.setUint8(36, 'd'.charCodeAt(0))

      d.setUint8(37, 'a'.charCodeAt(0))

      d.setUint8(38, 't'.charCodeAt(0))

      d.setUint8(39, 'a'.charCodeAt(0))

      d.setUint32(40, data.byteLength, true)

      return this.concat(header, data)

    ,

在java里面 把 文件转换成二进制流 然后在.net里面 再把二进制流转化成文件.......

参考技术A 方法1:专门写个函数把数据从数据库中读出,作为文件形(如test.jpg)式保存在硬盘上,然后在html中显示<img src="test.jpg"/
----------------------------------------------------------------------
方法2:写一个servlet
public class GetPhotoAction extends Action
private static final String CONTENT_TYPE = "image/gif; charset=GBK";//输出类型为 图像
public ActionForward execute(ActionMapping actionMapping,
ActionForm actionForm,
HttpServletRequest request,
HttpServletResponse response)
int empid;
try
empid = Integer.parseInt(request.getParameter("empid"));//员工id
catch (NumberFormatException e)
empid = 0;

response.setContentType(CONTENT_TYPE); //设置输出类型
OutputStream out = null;
try
out = response.getOutputStream();//得到输出流
catch (IOException ex1)
ex1.printStackTrace(System.out);

try
byte[] photo = Operator.getDBPhoto(empid);//从数据库中读出文件
if (photo!=null && photo.length > 0)
out.write(photo); //输出到网页上

catch (Exception ex)
ex.printStackTrace(System.out);

return null;


我是在struct中写的,如果你用的是jsp+servlet,把Action改成Servlet即可.
由于CONTENT_TYPE = "image/gif; charset=GBK",所以你直接在浏览器中访问这个servlet的话只能看到一张大的图片,就算写了out.write("aaaaaaaaa"),也不会看到字符输出,所以你只能在jsp中调用这个servlet
<img src="getPhotoAction.do?empid=123"alt="照片" width="185" height="218">

以上是关于将二进制流转化为声音的主要内容,如果未能解决你的问题,请参考以下文章

计算机网络基础知识-OSI七层协议模型

在java里面 把 文件转换成二进制流 然后在.net里面 再把二进制流转化成文件.......

文件转换成二进制流及二进制流转换成文件

java 如何将二进制数据流转换成字符串并保存

谈谈序列化和反序列化

OutputStreramWriter和InputStreamReader类