检测着色器何时完成混合音频
所以用这个代码我可以混合几个音轨 使用像素弯曲器完成的着色器。 这里的问题是我不知道混音何时完成或所有声音到达终点 能够将字节数组保存到文件中,任何事件或类似的东西
有帮助吗?
package
{
import flash.display.*;
import flash.media.*;
import flash.events.*;
import flash.net.*;
import flash.utils.*;
import fl.controls.Slider;
import org.bytearray.micrecorder.encoder.WaveEncoder;
[SWF(width='500', height='380', frameRate='24')]
public class AudioMixer extends Sprite{
[Embed(source = "sound2.mp3")] private var Track1:Class;
[Embed(source = "sound1.mp3")] private var Track2:Class;
[Embed(source = "mix.pbj",mimeType = "application/octet-stream")]
private var EmbedShader:Class;
private var shader:Shader = new Shader(new EmbedShader());
private var sound:Vector.<Sound> = new Vector.<Sound>();
private var bytes:Vector.<ByteArray> = new Vector.<ByteArray>();
private var sliders:Vector.<Slider> = new Vector.<Slider>();
private var graph:Vector.<Shape> = new Vector.<Shape>();
private var recBA:ByteArray = new ByteArray();
private var BUFFER_SIZE:int = 0x800;
public var playback:Sound = new Sound();
public var container:Sprite = new Sprite();
public var isEvent:Boolean = false;
public function AudioMixer():void{
container.y = stage.stageHeight * .5;
addChild(container);
sound.push(new Track1(), new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2());
for(var i:int = 0; i < sound.length; i++){
var slider:Slider = new Slider();
slider.maximum = 1;
slider.minimum = 0;
slider.snapInterval = 0.025;
slider.value = 0.8;
slider.rotation += -90;
slider.x = i * 40 + 25;
container.addChild(slider);
sliders.push(slider);
var line:Shape = new Shape();
line.graphics.lineStyle(1, 0x888888);
line.graphics.drawRect(i * 40 + 14, 0, 5, -80);
line.graphics.endFill();
container.addChild(line);
var shape:Shape = new Shape();
shape.graphics.beginFill(0x00cc00);
shape.graphics.drawRect(i * 40 + 15, 0, 3, -80);
shape.graphics.endFill();
container.addChild(shape);
graph.push(shape);
}
playback.addEventListener(SampleDataEvent.SAMPLE_DATA, onSoundData);
playback.play();
}
private function onSoundData(event:SampleDataEvent):void {
for(var i:int = 0; i < sound.length; i++){
bytes[i] = new ByteArray();
bytes[i].length = BUFFER_SIZE * 4 * 2;
sound[i].extract(bytes[i], BUFFER_SIZE);
var volume:Number = 0;
bytes[i].position = 0;
for(var j:int = 0; j < BUFFER_SIZE; j++){
volume += Math.abs(bytes[i].readFloat());
volume += Math.abs(bytes[i].readFloat());
}
volume = (volume / (BUFFER_SIZE * .5)) * sliders[i].value;
shader.data['track' + (i + 1)].width = BUFFER_SIZE / 1024;
shader.data['track' + (i + 1)].height = 512;
shader.data['track' + (i + 1)].input = bytes[i];
shader.data['vol' + (i + 1)].value = [sliders[i].value];
graph[i].scaleY = volume;
}
var shaderJob:ShaderJob = new ShaderJob(shader,event.data,BUFFER_SIZE / 1024,512);
shaderJob.start(true);
var shaderJob2:ShaderJob = new ShaderJob(shader,recBA,BUFFER_SIZE / 1024,512);
shaderJob2.start(true);
}
}
}
so this the code with it i am able to mix several tracks
with a Shader done in pixel bender.
the problem here i don't know when the mixing is finish or all the sound reache their end
to be able to save the bytearray into a file any Event or something like that
help plz ?
package
{
import flash.display.*;
import flash.media.*;
import flash.events.*;
import flash.net.*;
import flash.utils.*;
import fl.controls.Slider;
import org.bytearray.micrecorder.encoder.WaveEncoder;
[SWF(width='500', height='380', frameRate='24')]
public class AudioMixer extends Sprite{
[Embed(source = "sound2.mp3")] private var Track1:Class;
[Embed(source = "sound1.mp3")] private var Track2:Class;
[Embed(source = "mix.pbj",mimeType = "application/octet-stream")]
private var EmbedShader:Class;
private var shader:Shader = new Shader(new EmbedShader());
private var sound:Vector.<Sound> = new Vector.<Sound>();
private var bytes:Vector.<ByteArray> = new Vector.<ByteArray>();
private var sliders:Vector.<Slider> = new Vector.<Slider>();
private var graph:Vector.<Shape> = new Vector.<Shape>();
private var recBA:ByteArray = new ByteArray();
private var BUFFER_SIZE:int = 0x800;
public var playback:Sound = new Sound();
public var container:Sprite = new Sprite();
public var isEvent:Boolean = false;
public function AudioMixer():void{
container.y = stage.stageHeight * .5;
addChild(container);
sound.push(new Track1(), new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2(),new Track2());
for(var i:int = 0; i < sound.length; i++){
var slider:Slider = new Slider();
slider.maximum = 1;
slider.minimum = 0;
slider.snapInterval = 0.025;
slider.value = 0.8;
slider.rotation += -90;
slider.x = i * 40 + 25;
container.addChild(slider);
sliders.push(slider);
var line:Shape = new Shape();
line.graphics.lineStyle(1, 0x888888);
line.graphics.drawRect(i * 40 + 14, 0, 5, -80);
line.graphics.endFill();
container.addChild(line);
var shape:Shape = new Shape();
shape.graphics.beginFill(0x00cc00);
shape.graphics.drawRect(i * 40 + 15, 0, 3, -80);
shape.graphics.endFill();
container.addChild(shape);
graph.push(shape);
}
playback.addEventListener(SampleDataEvent.SAMPLE_DATA, onSoundData);
playback.play();
}
private function onSoundData(event:SampleDataEvent):void {
for(var i:int = 0; i < sound.length; i++){
bytes[i] = new ByteArray();
bytes[i].length = BUFFER_SIZE * 4 * 2;
sound[i].extract(bytes[i], BUFFER_SIZE);
var volume:Number = 0;
bytes[i].position = 0;
for(var j:int = 0; j < BUFFER_SIZE; j++){
volume += Math.abs(bytes[i].readFloat());
volume += Math.abs(bytes[i].readFloat());
}
volume = (volume / (BUFFER_SIZE * .5)) * sliders[i].value;
shader.data['track' + (i + 1)].width = BUFFER_SIZE / 1024;
shader.data['track' + (i + 1)].height = 512;
shader.data['track' + (i + 1)].input = bytes[i];
shader.data['vol' + (i + 1)].value = [sliders[i].value];
graph[i].scaleY = volume;
}
var shaderJob:ShaderJob = new ShaderJob(shader,event.data,BUFFER_SIZE / 1024,512);
shaderJob.start(true);
var shaderJob2:ShaderJob = new ShaderJob(shader,recBA,BUFFER_SIZE / 1024,512);
shaderJob2.start(true);
}
}
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
您可以使用 ShaderEvent.COMPLETE 侦听器判断着色器何时完成其工作。像这样:
请参阅此链接< /a> 了解更多详细信息。
不过,关于您的代码的一件事。您正在 SampleDataEvent 内部执行此着色器工作,我可以看到这是有问题的(可能),因为您的混合可能与播放不同步(也就是说,如果您计划实时混合并写入混合数据)返回到声音流中)。无论如何,这可能是一个新问题。这应该可以解决您需要知道混合何时完成的问题。
请注意,您还需要将“false”添加到shaderJob.start(false) 函数中。来自有关 ShaderEvent.COMPLETE 的文档:
“当异步执行的 ShaderJob 使用着色器完成数据处理时调度。当使用 waitForCompletion 参数的 false 值调用 start() 方法时,ShaderJob 实例异步执行。”
更新
响应您有关如何仅在未处理声音的情况下在sampleDataEvent内部进行处理的询问:
You can tell when a shader has completed it's job using the ShaderEvent.COMPLETE listener. Like so:
See this link for more details.
One thing about your code though. You're doing this shader job inside of a sampleDataEvent and I can see this being problematic (possibly) in the sense that your mixing may be out of sync with your playback (that is, if you plan on mixing live and writing the mixed data back into the sound stream). Anyway that's perhaps an issue for a new question. This should solve your problem with needing to know when the mixing is complete.
Note you also need to add "false" to the shaderJob.start(false) function. From the documentation about the ShaderEvent.COMPLETE:
"Dispatched when a ShaderJob that executes asynchronously finishes processing the data using the shader. A ShaderJob instance executes asynchronously when the start() method is called with a false value for the waitForCompletion parameter."
Update
In response to your inquiry about how to only process inside the sampleDataEvent if the sound isnt being processed: