是否可以最好使用javascript混合多个音频文件
Posted
技术标签:
【中文标题】是否可以最好使用javascript混合多个音频文件【英文标题】:Is it possible to mix multiple audio files on top of each other preferably with javascript 【发布时间】:2016-11-13 02:58:58 【问题描述】:我想合并音频剪辑,彼此叠加,以便它们同步播放并保存在新的音频文件中。任何帮助将非常感激。我已经在网上进行了一些挖掘,但无法找到关于 javascript 音频编辑库(例如 Mix.js)中的许多可用工具是否有能力的明确答案。
【问题讨论】:
这只是一次性的吗?是否需要以编程方式完成?为什么最好是 JS? 另外 - 如果我对上述问题的回答是“是”、“否”,......然后使用DAW。提供大量免费软件和开源选项。对于您想做的事情,非常简单,学习曲线低。 在您回答上述问题后......这是来自浏览器还是 nodejs ? 我计划将其委托给的开发人员最喜欢使用 javascript,但这不再是优先事项。这需要以编程方式完成。如果可以在前端完成,那将是最好的,但我知道解决方案可能需要在后端。 【参考方案1】:是的,可以使用OfflineAudioContext()
或AudioContext.createChannelMerger()
并创建MediaStream
。见Phonegap mixing audio files,Web Audio API。
您可以使用fetch()
或XMLHttpRequest()
检索音频资源作为ArrayBuffer
,AudioContext.decodeAudioData()
从响应中创建AudioBufferSourceNode
; OfflineAudioContext()
渲染合并音频,AudioContext
, AudioContext.createBufferSource()
, AudioContext.createMediaStreamDestination()
, MediaRecorder()
录制流; Promise.all()
、Promise()
构造函数、.then()
处理对fetch()
、AudioContext.decodeAudioData()
的异步请求,将生成的混合音频Blob
传递给stop
事件MediaRecorder
。
将每个AudioContext
AudioBufferSourceNode
连接到OfflineAudioContext.destination
,在每个节点上调用.start()
;致电OfflineAudioContext.startRendering()
;创建新的AudioContext
节点,连接renderedBuffer
;在AudioContext
上调用.createMediaStreamDestination()
以从合并的音频缓冲区创建MediaStream
,将.stream
传递给MediaRecorder()
,在MediaRecorder
的stop
事件中,创建Blob URL
的Blob
录制的音频混合带有URL.createObjectURL()
,可以使用带有download
属性和href
设置为Blob URL
的<a>
元素下载。
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src)
return fetch(src)
.then(function(response)
return response.arrayBuffer()
)
function stopMix(duration, ...media)
setTimeout(function(media)
media.forEach(function(node)
node.stop()
)
, duration, media)
Promise.all(sources.map(get)).then(function(data)
var len = Math.max.apply(Math, data.map(function(buffer)
return buffer.byteLength
));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer)
return audio.decodeAudioData(buffer)
.then(function(bufferSource)
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
)
))
.then(function()
return context.startRendering()
)
.then(function(renderedBuffer)
return new Promise(function(resolve)
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerhtml = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event)
chunks.push(event.data);
;
recorder.onstop = function(event)
var blob = new Blob(chunks,
"type": "audio/ogg; codecs=opus"
);
console.log("recording complete");
resolve(blob)
;
)
)
.then(function(blob)
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
)
)
.catch(function(e)
console.log(e)
);
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
您也可以使用AudioContext.createChannelMerger()
、AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src)
return fetch(src)
.then(function(response)
return response.arrayBuffer()
)
function stopMix(duration, ...media)
setTimeout(function(media)
media.forEach(function(node)
node.stop()
)
, duration, media)
Promise.all(sources.map(get)).then(function(data)
return Promise.all(data.map(function(buffer, index)
return audio.decodeAudioData(buffer)
.then(function(bufferSource)
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
)
))
.then(function(audionodes)
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node)
node.start(0)
);
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event)
chunks.push(event.data);
;
recorder.onstop = function(event)
var blob = new Blob(chunks,
"type": "audio/ogg; codecs=opus"
);
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
;
)
)
.catch(function(e)
console.log(e)
);
【讨论】:
嗨,我有一个带有打字稿的 Angular 项目,它说:“AudioContext”类型上不存在属性“createMediaStreamDestination”。 @oihi08 您在哪些浏览器上尝试了代码并得到了错误? 当我尝试编译代码时出现错误,但神奇的是,现在可以工作了...谢谢! 对不起,又出现了。当我尝试编译代码时出现: C:/Users/sm133/Documents/PROYECTOS/panelDeGestion/panelDeMedios/src/app/pages/login/login.component.ts (58,30) 中的错误:属性'createMediaStreamDestination' 'AudioContext' 类型上不存在。 ie不支持.createMediaStreamDestination()
developer.mozilla.org/en-US/docs/Web/API/AudioContext/…以上是关于是否可以最好使用javascript混合多个音频文件的主要内容,如果未能解决你的问题,请参考以下文章
在后台更改 Windows 10 应用程序音频混合(最好使用 Python)