开发者

Audio unit, set format fails with -10581

I can't figure out why the following returns OSStatus -10851:

status = AudioUnitSetProperty(*audioUnit, 
                kAudioUnitProperty_StreamFormat, 
                kAudioUnitScope_Output, 
                kInputBus, 
                &outFormat, 
                sizeof(outFormat));

Works on the simulator but not on the device.

Here is the rest of the code:

    #import "VoipRecorder.h"
#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>

#define kOutputBus 0
#define kInputBus 1

void SetAUCanonical(AudioStreamBasicDescription *format, UInt32 nChannels, bool interleaved)
// note: leaves sample rate untouched
{
    format->mFormatID = kAudioFormatLinearPCM;
#if TARGET_IPHONE_SIMULATOR
    int sampleSize = sizeof(Float32);
    format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
#else
    int sampleSize = sizeof(AudioSampleType);
    format->mFormatFlags = kAudioFormatFlagsCanonical;
#endif
    format->mBitsPerChannel = 8 * sampleSize;
    format->mChannelsPerFrame = nChannels;
    format->mFramesPerPacket = 1;
    if (interleaved)
        format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize;
    else {
        format->mBytesPerPacket = format->mBytesPerFrame = sampleSize;
        format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
    }
}

int SetupRemoteIO (AudioUnit *audioUnit, AURenderCallbackStruct inRenderProc, AURenderCallbackStruct inOutputProc, AudioStreamBasicDescription * outFormat)
{
    OSStatus status;

    // Open the output unit
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    AudioComponent comp = AudioComponentFindNext(NULL, &desc);

    AudioComponentInstanceNew(comp, audioUnit);

    UInt32 flag = 1;
    // Enable IO for recording
    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));

    assert(status == 0);

    // Enable IO for playback
    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &flag, 
                                  sizeof(flag));    

    assert(status == 0);

    // set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
    SetAUCanonical(outFormat, 1, NO);

    outFormat->mSampleRate = 44100.00; //8000;

    //Apply format
    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &outFormat, 
                                  sizeof(outFormat));

    assert(status == 0);开发者_JAVA技巧

    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &outFormat, 
                                  sizeof(outFormat));

    // Setup callbacks
    // Recording
    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Input, 
                                  kInputBus, 
                                  &inRenderProc, 
                                  sizeof(inRenderProc));
    assert(status == 0);

    // Playback
    status = AudioUnitSetProperty(*audioUnit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &inOutputProc, 
                                  sizeof(inOutputProc));
    assert(status == 0);

    status = AudioUnitInitialize(*audioUnit);
    assert(status == 0);

    return 0;
}

@implementation VoipRecorder

@synthesize audioUnit;

- (id)init
{
    self = [super init];
    if (self) {

    }

    return self;
}

void rioInterruptionListener(void *inClientData, UInt32 inInterruption)
{
    printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption");

    VoipRecorder *THIS = (VoipRecorder*)inClientData;

    if (inInterruption == kAudioSessionEndInterruption) {
        // make sure we are again the active session
        AudioSessionSetActive(true);
        AudioOutputUnitStart(THIS.audioUnit);
    }

    if (inInterruption == kAudioSessionBeginInterruption) {
        AudioOutputUnitStop(THIS.audioUnit);
    }
}

int buffer[1000000];
int bufferSize = 2;

static OSStatus PerformSpeaker(
                            void                        *inRefCon, 
                            AudioUnitRenderActionFlags  *ioActionFlags, 
                            const AudioTimeStamp        *inTimeStamp, 
                            UInt32                      inBusNumber, 
                            UInt32                      inNumberFrames, 
                            AudioBufferList             *ioData)
{
    NSLog(@"Speaker");

    if (bufferSize == 0) {
        return 0;
    }

    if (ioData == NULL) {
        NSLog(@"err");
        return 0;
    }

    return 0;
}

AudioBufferList *AllocateBuffers(UInt32 nBytes)
{
    int channelCount = 2;

    AudioBufferList *audioBufferList;
    audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
    audioBufferList->mNumberBuffers = 1;
    audioBufferList->mBuffers[0].mNumberChannels = channelCount;
    audioBufferList->mBuffers[0].mDataByteSize = nBytes;
    audioBufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(nBytes);

    return audioBufferList;
}

static OSStatus PerformThru(
                            void                        *inRefCon, 
                            AudioUnitRenderActionFlags  *ioActionFlags, 
                            const AudioTimeStamp        *inTimeStamp, 
                            UInt32                      inBusNumber, 
                            UInt32                      inNumberFrames, 
                            AudioBufferList             *ioData)
{
    VoipRecorder *THIS = (VoipRecorder *)inRefCon;

    AudioBufferList *bufferList = AllocateBuffers(inNumberFrames*2);

    OSStatus err = AudioUnitRender(THIS.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, bufferList);
    if (err) {
        printf("PerformThru: error %d\n", (int)err); 
        free(bufferList); 
        return err; 
    }


    free(bufferList);

    return 0;
}


- (void)setupAudio {

    OSStatus status;

    inputProc.inputProc = PerformThru;
    inputProc.inputProcRefCon = self;

    outputProc.inputProc = PerformSpeaker;
    outputProc.inputProcRefCon = self;

    buffer[0] = 0x4444;
    buffer[1] = 0xffff;
    status = AudioSessionInitialize(NULL, NULL, rioInterruptionListener, self);
    assert(status == 0);

    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
    status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
    assert(status == 0);

    Float32 preferredBufferSize = .005;
    status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
    assert(status == 0);

    UInt32 size = sizeof(hwSampleRate);
    status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
    assert(status == 0);

    status = AudioSessionSetActive(true);
    assert(status == 0);

    status = SetupRemoteIO(&audioUnit, inputProc, outputProc, &thruFormat);
    assert(status == 0);

    status = AudioOutputUnitStart(audioUnit);
    assert(status == 0);

    size = sizeof(thruFormat);
    status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, &size);
    assert(status == 0);

    //NSLog(@"0x%X", status);
}


Two possible items to check: Your posted code is mixing the use of AudioSampleType and AudioUnitSampleType, which are two different sized data types. You are also specifying the kAudioFormatFlagIsNonInterleaved flag on only 1 channel of data, which probably isn't necessary.

0

上一篇:

下一篇:

精彩评论

暂无评论...
验证码 换一张
取 消

最新问答

问答排行榜