使用AudioQueue来实现音频播放功能时最主要的步骤,可以更简练的归纳如下。
1. 打开播放音频文件
2. 取得播放音频文件的数据格式
3. 准备播放用的队列
4. 将缓冲中的数据移动到队列中
5. 开始播放
6. 在回调函数中进行队列处理
1.playAudio.h
声明了一个Objective-C类
1.playAudio.h
声明了一个Objective-C类
1.播放音乐
//
// playAudio.h
// ffmpegPlayAudio
//
// Created by infomedia xuanyuanchen on 12-3-26.
// Copyright (c) 2012年 xuanyuanchen. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#import <AudioToolbox/AudioFile.h>
#define NUM_BUFFERS 3
@interface playAudio : NSObject{
//播放音频文件ID
AudioFileID audioFile;
//音频流描述对象
AudioStreamBasicDescription dataFormat;
//音频队列
AudioQueueRef queue;
SInt64 packetIndex;
UInt32 numPacketsToRead;
UInt32 bufferByteSize;
AudioStreamPacketDescription *packetDescs;
AudioQueueBufferRef buffers[NUM_BUFFERS];
}
//定义队列为实例属性
@property AudioQueueRef queue;
//播放方法定义
-(id)initWithAudio:(NSString *) path;
//定义缓存数据读取方法
-(void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue
queueBuffer:(AudioQueueBufferRef)audioQueueBuffer;
-(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer;
//定义回调(Callback)函数
static void BufferCallack(void *inUserData,AudioQueueRef inAQ,
AudioQueueBufferRef buffer);
@end
2.playAudio.m
playAudio的实现
//
// playAudio.m
// ffmpegPlayAudio
//
// Created by infomedia infomedia on 12-3-26.
// Copyright (c) 2012年 infomedia. All rights reserved.
//
#import "playAudio.h"
//实际测试中发现,这个gBufferSizeBytes=0x10000;对于压缩的音频格式(mp3/aac等)没有任何问题,但是如果输入的音频文件格式是wav,会出现只播放几秒便暂停的现象;而手机又不可能去申请更大的内存去处理wav文件,不知到大家能有什么好的方法给点建议
static UInt32 gBufferSizeBytes=0x10000;//It muse be pow(2,x)
@implementation playAudio
@synthesize queue;
//回调函数(Callback)的实现
static void BufferCallback(void *inUserData,AudioQueueRef inAQ,
AudioQueueBufferRef buffer){
playAudio* player=(__bridge playAudio*)inUserData;
[player audioQueueOutputWithQueue:inAQ queueBuffer:buffer];
}
//缓存数据读取方法的实现
-(void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue queueBuffer:(AudioQueueBufferRef)audioQueueBuffer{
OSStatus status;
//读取包数据
UInt32 numBytes;
UInt32 numPackets=numPacketsToRead;
status = AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs, packetIndex,&numPackets, audioQueueBuffer->mAudioData);
//成功读取时
if (numPackets>0) {
//将缓冲的容量设置为与读取的音频数据一样大小(确保内存空间)
audioQueueBuffer->mAudioDataByteSize=numBytes;
//完成给队列配置缓存的处理
status = AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffer, numPackets, packetDescs);
//移动包的位置
packetIndex += numPackets;
}
}
//音频播放的初始化、实现
//在ViewController中声明一个PlayAudio对象,并用下面的方法来初始化
//self.audio=[[playAudioalloc]initWithAudio:@"/Users/xuanyuanchen/audio/daolang.mp3"];
-(id) initWithAudio:(NSString *)path{
if (!(self=[super init])) return nil;
UInt32 size,maxPacketSize;
char *cookie;
int i;
OSStatus status;
//打开音频文件
status=AudioFileOpenURL((CFURLRef)[NSURL fileURLWithPath:path], kAudioFileReadPermission, 0, &audioFile);
if (status != noErr) {
//错误处理
NSLog(@"*** Error *** PlayAudio - play:Path: could not open audio file. Path given was: %@", path);
return nil;
}
for (int i=0; i<NUM_BUFFERS; i++) {
AudioQueueEnqueueBuffer(queue, buffers[i], 0, nil);
}
//取得音频数据格式
size = sizeof(dataFormat);
AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &size, &dataFormat);
//创建播放用的音频队列
AudioQueueNewOutput(&dataFormat, BufferCallback, self,
nil, nil, 0, &queue);
//计算单位时间包含的包数
if (dataFormat.mBytesPerPacket==0 || dataFormat.mFramesPerPacket==0) {
size=sizeof(maxPacketSize);
AudioFileGetProperty(audioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize);
if (maxPacketSize > gBufferSizeBytes) {
maxPacketSize= gBufferSizeBytes;
}
//算出单位时间内含有的包数
numPacketsToRead = gBufferSizeBytes/maxPacketSize;
packetDescs=malloc(sizeof(AudioStreamPacketDescription)*numPacketsToRead);
}else {
numPacketsToRead= gBufferSizeBytes/dataFormat.mBytesPerPacket;
packetDescs=nil;
}
//设置Magic Cookie,参见第二十七章的相关介绍
AudioFileGetProperty(audioFile, kAudioFilePropertyMagicCookieData, &size, nil);
if (size >0) {
cookie=malloc(sizeof(char)*size);
AudioFileGetProperty(audioFile, kAudioFilePropertyMagicCookieData, &size, cookie);
AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, cookie, size);
}
//创建并分配缓冲空间
packetIndex=0;
for (i=0; i<NUM_BUFFERS; i++) {
AudioQueueAllocateBuffer(queue, gBufferSizeBytes, &buffers[i]);
//读取包数据
if ([self readPacketsIntoBuffer:buffers[i]]==1) {
break;
}
}
Float32 gain=1.0;
//设置音量
AudioQueueSetParameter(queue, kAudioQueueParam_Volume, gain);
//队列处理开始,此后系统开始自动调用回调(Callback)函数
AudioQueueStart(queue, nil);
return self;
}
-(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer {
UInt32 numBytes,numPackets;
//从文件中接受数据并保存到缓存(buffer)中
numPackets = numPacketsToRead;
AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs, packetIndex, &numPackets, buffer->mAudioData);
if(numPackets >0){
buffer->mAudioDataByteSize=numBytes;
AudioQueueEnqueueBuffer(queue, buffer, (packetDescs ? numPackets : 0), packetDescs);
packetIndex += numPackets;
}
else{
return 1;//意味着我们没有读到任何的包
}
return 0;//0代表正常的退出
}
@end
2.录音
(1.)record.h
static const int kNumberBuffers = 3;
#import <AudioToolbox/AudioToolbox.h>
#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
@interface Record : NSObject<AVAudioPlayerDelegate>
{
AudioStreamBasicDescription mDataFormat;
AudioQueueRef mQueue;
AudioQueueBufferRef mBuffers[kNumberBuffers];
UInt32 bufferByteSize;
AVAudioPlayer *player;
}
@property(nonatomic,assign) bool mIsRunning;
@property(nonatomic,retain) NSMutableData* recordData;
-(void)record;
-(void)stop;
-(void)play;
-(void)pause;
-(void)dealWithData:(NSData*)data;
@end
(2.)record.m
#import "Record.h"
@implementation Record
@synthesize mIsRunning = _mIsRunning;
@synthesize recordData =_recordData;
-(id)init
{
self = [super init];
if (self) {
AudioSessionInitialize(NULL, NULL, NULL, (__bridge void*)self);
// self.recordData =[NSMutableData data];
self.mIsRunning=false;
}
return self;
}
static void HandleInputBuffer (
void *inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc
) {
Record *recorderPro = (__bridge Record *)inUserData;
if (inNumPackets > 0 && recorderPro.mIsRunning){
int pcmSize = inBuffer->mAudioDataByteSize;
char *pcmData = (char *)inBuffer->mAudioData;
NSData *data = [[NSData alloc] initWithBytes:pcmData length:pcmSize];
[recorderPro dealWithData:data];
// [recorderPro.recordData appendData:data];
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
}
void DeriveBufferSize (
AudioQueueRef audioQueue,
AudioStreamBasicDescription &ASBDescription,
Float64 seconds,
UInt32 *outBufferSize
) {
static const int maxBufferSize = 0x50000;
int maxPacketSize = ASBDescription.mBytesPerPacket;
if (maxPacketSize == 0) {
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
AudioQueueGetProperty (
audioQueue,
kAudioQueueProperty_MaximumOutputPacketSize,
&maxPacketSize,
&maxVBRPacketSize
);
}
Float64 numBytesForTime =
ASBDescription.mSampleRate * maxPacketSize * seconds;
*outBufferSize =
UInt32 (numBytesForTime < maxBufferSize ?
numBytesForTime : maxBufferSize);
}
-(void)dealWithData:(NSData*)data
{
[self changeVoice:data];
}
-(void)changeVoice:(NSData*)audioData
{
soundtouch::SoundTouch mSoundTouch;
mSoundTouch.setSampleRate(16000);
mSoundTouch.setChannels(1);
mSoundTouch.setTempoChange(0.05);
mSoundTouch.setPitchSemiTones(12);
mSoundTouch.setRateChange(-0.7);
mSoundTouch.setSetting(SETTING_SEQUENCE_MS, 40);
mSoundTouch.setSetting(SETTING_SEEKWINDOW_MS, 16);
mSoundTouch.setSetting(SETTING_OVERLAP_MS, 8);
if (audioData != nil) {
char *pcmData = (char *)audioData.bytes;
int pcmSize = audioData.length;
int nSamples = pcmSize / 2;
mSoundTouch.putSamples((short *)pcmData, nSamples);
short *samples = new short[pcmSize];
int numSamples = 0;
do {
memset(samples, 0, pcmSize);
numSamples = mSoundTouch.receiveSamples(samples, pcmSize);
[self.recordData appendBytes:samples length:numSamples*2];
} while (numSamples > 0);
delete [] samples;
}
NSLog(@"-------recording%d",self.recordData.length);
}
- (void) setupAudioFormat:(UInt32) inFormatID SampleRate:(int) sampeleRate
{
memset(&mDataFormat, 0, sizeof(mDataFormat));
mDataFormat.mSampleRate = sampeleRate;
//UInt32 size = sizeof(mDataFormat.mChannelsPerFrame);
//AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumberChannels, &size, &mDataFormat.mChannelsPerFrame);
mDataFormat.mChannelsPerFrame=1;
mDataFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM){
// if we want pcm, default to signed 16-bit little-endian
mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mDataFormat.mBitsPerChannel = 16;
mDataFormat.mBytesPerPacket = mDataFormat.mBytesPerFrame = (mDataFormat.mBitsPerChannel / 8) * mDataFormat.mChannelsPerFrame;
mDataFormat.mFramesPerPacket = 1;
}
}
-(void)record
{
self.recordData = [NSMutableData data];
AudioSessionSetActive(true);
// category
UInt32 category = kAudioSessionCategory_PlayAndRecord;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
// format
[self setupAudioFormat:kAudioFormatLinearPCM SampleRate:16000];
DeriveBufferSize (mQueue,mDataFormat,0.5,&bufferByteSize);
// 设置回调函数
AudioQueueNewInput(&mDataFormat, HandleInputBuffer, (__bridge void*)self, NULL, NULL, 0, &mQueue);
for (int i = 0; i < kNumberBuffers; ++i) {
AudioQueueAllocateBuffer (mQueue,bufferByteSize,&mBuffers[i]);
AudioQueueEnqueueBuffer (mQueue,mBuffers[i],0,NULL);
}
// 开始录音
AudioQueueStart(mQueue, NULL);
self. mIsRunning= YES;
}
-(void)stop
{
AudioQueueFlush(mQueue);
AudioQueueStop (mQueue,true);
NSMutableData *wavDatas = [[NSMutableData alloc] init];
int fileLength = self.recordData.length;
void *header = createWaveHeader(fileLength, 1, 16000, 16);
[wavDatas appendBytes:header length:44];
[wavDatas appendData:self.recordData];
self.recordData = wavDatas;
NSLog(@"-------stop%d",self.recordData.length);
// NSString *path = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) objectAtIndex:0];
// NSString *filePath = [path stringByAppendingPathComponent:@"soundtouch.wav"];
// [wavDatas writeToFile:filePath atomically:YES];
self.mIsRunning = false;
}
-(void)play
{
NSError *playerError;
player = [[AVAudioPlayer alloc] initWithData:self.recordData error:&playerError];
// NSString *path = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) objectAtIndex:0];
// NSString *filePath = [path stringByAppendingPathComponent:@"soundtouch.wav"];
// player = [[AVAudioPlayer alloc] initWithContentsOfURL:recordedFile error:&playerError];
// player = [[AVAudioPlayer alloc] initWithContentsOfURL:[NSURL fileURLWithPath:filePath] error:&playerError];
if (player == nil)
{
NSLog(@"ERror creating player: %@", [playerError description]);
}
player.delegate = self;
if(![player isPlaying]) {
[player play];
}
}
-(void)pause
{
if ([player isPlaying]) {
[player pause];
}
}
- (void)audioPlayerDidFinishPlaying:(AVAudioPlayer *)player successfully:(BOOL)flag
{
NSLog(@"%@",@"audioPlayerDidFinishPlaying");
}
@end