音频单元,设置格式失败,返回 -10581
我不明白为什么以下返回 OSStatus -10851:
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));
Works on the Simulator but not on the device。
这是其余的代码:
#import "VoipRecorder.h"
#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>
#define kOutputBus 0
#define kInputBus 1
void SetAUCanonical(AudioStreamBasicDescription *format, UInt32 nChannels, bool interleaved)
// note: leaves sample rate untouched
{
format->mFormatID = kAudioFormatLinearPCM;
#if TARGET_IPHONE_SIMULATOR
int sampleSize = sizeof(Float32);
format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
#else
int sampleSize = sizeof(AudioSampleType);
format->mFormatFlags = kAudioFormatFlagsCanonical;
#endif
format->mBitsPerChannel = 8 * sampleSize;
format->mChannelsPerFrame = nChannels;
format->mFramesPerPacket = 1;
if (interleaved)
format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize;
else {
format->mBytesPerPacket = format->mBytesPerFrame = sampleSize;
format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
}
}
int SetupRemoteIO (AudioUnit *audioUnit, AURenderCallbackStruct inRenderProc, AURenderCallbackStruct inOutputProc, AudioStreamBasicDescription * outFormat)
{
OSStatus status;
// Open the output unit
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, audioUnit);
UInt32 flag = 1;
// Enable IO for recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
assert(status == 0);
// Enable IO for playback
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
assert(status == 0);
// set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
SetAUCanonical(outFormat, 1, NO);
outFormat->mSampleRate = 44100.00; //8000;
//Apply format
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));
assert(status == 0);
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&outFormat,
sizeof(outFormat));
// Setup callbacks
// Recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kInputBus,
&inRenderProc,
sizeof(inRenderProc));
assert(status == 0);
// Playback
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Output,
kOutputBus,
&inOutputProc,
sizeof(inOutputProc));
assert(status == 0);
status = AudioUnitInitialize(*audioUnit);
assert(status == 0);
return 0;
}
@implementation VoipRecorder
@synthesize audioUnit;
- (id)init
{
self = [super init];
if (self) {
}
return self;
}
void rioInterruptionListener(void *inClientData, UInt32 inInterruption)
{
printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption");
VoipRecorder *THIS = (VoipRecorder*)inClientData;
if (inInterruption == kAudioSessionEndInterruption) {
// make sure we are again the active session
AudioSessionSetActive(true);
AudioOutputUnitStart(THIS.audioUnit);
}
if (inInterruption == kAudioSessionBeginInterruption) {
AudioOutputUnitStop(THIS.audioUnit);
}
}
int buffer[1000000];
int bufferSize = 2;
static OSStatus PerformSpeaker(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
NSLog(@"Speaker");
if (bufferSize == 0) {
return 0;
}
if (ioData == NULL) {
NSLog(@"err");
return 0;
}
return 0;
}
AudioBufferList *AllocateBuffers(UInt32 nBytes)
{
int channelCount = 2;
AudioBufferList *audioBufferList;
audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
audioBufferList->mNumberBuffers = 1;
audioBufferList->mBuffers[0].mNumberChannels = channelCount;
audioBufferList->mBuffers[0].mDataByteSize = nBytes;
audioBufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(nBytes);
return audioBufferList;
}
static OSStatus PerformThru(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
VoipRecorder *THIS = (VoipRecorder *)inRefCon;
AudioBufferList *bufferList = AllocateBuffers(inNumberFrames*2);
OSStatus err = AudioUnitRender(THIS.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, bufferList);
if (err) {
printf("PerformThru: error %d\n", (int)err);
free(bufferList);
return err;
}
free(bufferList);
return 0;
}
- (void)setupAudio {
OSStatus status;
inputProc.inputProc = PerformThru;
inputProc.inputProcRefCon = self;
outputProc.inputProc = PerformSpeaker;
outputProc.inputProcRefCon = self;
buffer[0] = 0x4444;
buffer[1] = 0xffff;
status = AudioSessionInitialize(NULL, NULL, rioInterruptionListener, self);
assert(status == 0);
UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
assert(status == 0);
Float32 preferredBufferSize = .005;
status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
assert(status == 0);
UInt32 size = sizeof(hwSampleRate);
status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
assert(status == 0);
status = AudioSessionSetActive(true);
assert(status == 0);
status = SetupRemoteIO(&audioUnit, inputProc, outputProc, &thruFormat);
assert(status == 0);
status = AudioOutputUnitStart(audioUnit);
assert(status == 0);
size = sizeof(thruFormat);
status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, &size);
assert(status == 0);
//NSLog(@"0x%X", status);
}
I can't figure out why the following returns OSStatus -10851:
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));
Works on the simulator but not on the device.
Here is the rest of the code:
#import "VoipRecorder.h"
#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>
#define kOutputBus 0
#define kInputBus 1
void SetAUCanonical(AudioStreamBasicDescription *format, UInt32 nChannels, bool interleaved)
// note: leaves sample rate untouched
{
format->mFormatID = kAudioFormatLinearPCM;
#if TARGET_IPHONE_SIMULATOR
int sampleSize = sizeof(Float32);
format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
#else
int sampleSize = sizeof(AudioSampleType);
format->mFormatFlags = kAudioFormatFlagsCanonical;
#endif
format->mBitsPerChannel = 8 * sampleSize;
format->mChannelsPerFrame = nChannels;
format->mFramesPerPacket = 1;
if (interleaved)
format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize;
else {
format->mBytesPerPacket = format->mBytesPerFrame = sampleSize;
format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
}
}
int SetupRemoteIO (AudioUnit *audioUnit, AURenderCallbackStruct inRenderProc, AURenderCallbackStruct inOutputProc, AudioStreamBasicDescription * outFormat)
{
OSStatus status;
// Open the output unit
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, audioUnit);
UInt32 flag = 1;
// Enable IO for recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
assert(status == 0);
// Enable IO for playback
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
assert(status == 0);
// set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
SetAUCanonical(outFormat, 1, NO);
outFormat->mSampleRate = 44100.00; //8000;
//Apply format
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));
assert(status == 0);
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&outFormat,
sizeof(outFormat));
// Setup callbacks
// Recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kInputBus,
&inRenderProc,
sizeof(inRenderProc));
assert(status == 0);
// Playback
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Output,
kOutputBus,
&inOutputProc,
sizeof(inOutputProc));
assert(status == 0);
status = AudioUnitInitialize(*audioUnit);
assert(status == 0);
return 0;
}
@implementation VoipRecorder
@synthesize audioUnit;
- (id)init
{
self = [super init];
if (self) {
}
return self;
}
void rioInterruptionListener(void *inClientData, UInt32 inInterruption)
{
printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption");
VoipRecorder *THIS = (VoipRecorder*)inClientData;
if (inInterruption == kAudioSessionEndInterruption) {
// make sure we are again the active session
AudioSessionSetActive(true);
AudioOutputUnitStart(THIS.audioUnit);
}
if (inInterruption == kAudioSessionBeginInterruption) {
AudioOutputUnitStop(THIS.audioUnit);
}
}
int buffer[1000000];
int bufferSize = 2;
static OSStatus PerformSpeaker(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
NSLog(@"Speaker");
if (bufferSize == 0) {
return 0;
}
if (ioData == NULL) {
NSLog(@"err");
return 0;
}
return 0;
}
AudioBufferList *AllocateBuffers(UInt32 nBytes)
{
int channelCount = 2;
AudioBufferList *audioBufferList;
audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
audioBufferList->mNumberBuffers = 1;
audioBufferList->mBuffers[0].mNumberChannels = channelCount;
audioBufferList->mBuffers[0].mDataByteSize = nBytes;
audioBufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(nBytes);
return audioBufferList;
}
static OSStatus PerformThru(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
VoipRecorder *THIS = (VoipRecorder *)inRefCon;
AudioBufferList *bufferList = AllocateBuffers(inNumberFrames*2);
OSStatus err = AudioUnitRender(THIS.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, bufferList);
if (err) {
printf("PerformThru: error %d\n", (int)err);
free(bufferList);
return err;
}
free(bufferList);
return 0;
}
- (void)setupAudio {
OSStatus status;
inputProc.inputProc = PerformThru;
inputProc.inputProcRefCon = self;
outputProc.inputProc = PerformSpeaker;
outputProc.inputProcRefCon = self;
buffer[0] = 0x4444;
buffer[1] = 0xffff;
status = AudioSessionInitialize(NULL, NULL, rioInterruptionListener, self);
assert(status == 0);
UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
assert(status == 0);
Float32 preferredBufferSize = .005;
status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
assert(status == 0);
UInt32 size = sizeof(hwSampleRate);
status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
assert(status == 0);
status = AudioSessionSetActive(true);
assert(status == 0);
status = SetupRemoteIO(&audioUnit, inputProc, outputProc, &thruFormat);
assert(status == 0);
status = AudioOutputUnitStart(audioUnit);
assert(status == 0);
size = sizeof(thruFormat);
status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, &size);
assert(status == 0);
//NSLog(@"0x%X", status);
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
有两个可能需要检查的项目:您发布的代码混合使用了 AudioSampleType 和 AudioUnitSampleType,这是两种不同大小的数据类型。您还只在 1 个数据通道上指定 kAudioFormatFlagIsNonInterleaved 标志,这可能没有必要。
Two possible items to check: Your posted code is mixing the use of AudioSampleType and AudioUnitSampleType, which are two different sized data types. You are also specifying the kAudioFormatFlagIsNonInterleaved flag on only 1 channel of data, which probably isn't necessary.