PDA

View Full Version : How to use CoreAudio (MacOS)



Key-Real
08-04-2022, 03:29 PM
MacOS CoreAudio buffer playback produces annoying noise between correct sound.I'm interested to play valid .wav data though the buffer.Why I'm playing a .wav? It has valid data.What I'm trying to achieve is to understand how to write correctly to the sound buffer. I'm porting a music engine to MacOS ....



{$linkframework AudioToolbox}
{$MODE OBJFPC}
uses MacOSAll,CocoaAll,crt,baseunix;


type


TSoundState = packed record
done:boolean;
end;




TwavHeader = packed record

RIFF:array [0..3] of char; // RIFF Header Magic header
ChunkSize:dword; // RIFF Chunk Size
WAVE:array [0..3] of char; // WAVE Header

fmt:array [0..3] of char; // FMT header
Subchunk1Size:dword; // Size of the fmt chunk
AudioFormat:word; // Audio format 1=PCM,6=mulaw,7=alaw, 257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM
NumOfChan:word; // Number of channels 1=Mono 2=Sterio
SamplesPerSec:dword; // Sampling Frequency in Hz
bytesPerSec:dword; // bytes per second
blockAlign:word; // 2=16-bit mono, 4=16-bit stereo
bitsPerSample:word; // Number of bits per sample

Subchunk2ID:array [0..3] of char; // "data" string
Subchunk2Size:dword; // Sampled data length
end;




var
wavFile:file;
wavheader:TwavHeader;


bufferSize:uint32;


Var
Req,Rem : TimeSpec;




procedure auCallback(inUserData:pointer; queue:AudioQueueRef; buffer:AudioQueueBufferRef); MWPascal;
var
SoundState : TSoundState;
p:pointer;
numToRead:dword;
i:dword;
w:word;
f:single;
int:integer;
begin

SoundState:=TSoundState(inUserData^);


buffer^.mAudioDataByteSize := bufferSize div sizeof(single);


numToRead:=bufferSize div sizeof(single) * 2;


getmem(p,numToRead);




blockread(wavFile,p^,numToRead);






for i:=0 to numToRead div 2 do begin
w:=pword(p+i*2)^;
f:=(w / $8000) - 1;
psingle(buffer^.mAudioData + i*sizeof(single))^:=f;


end;






freemem(p);




AudioQueueEnqueueBuffer(queue, buffer, 0, nil);


end;


procedure checkError(error:OSStatus);
begin
if (error <> noErr) then begin
writeln('Error: ', error);
halt;
end;
end;




var
auDesc:AudioStreamBasicDescription;
auQueue:AudioQueueRef;
auBuffers:array[0..1] of AudioQueueBufferRef;
soundState:TSoundState;
err:OSStatus;


begin










assign(wavFile,'unreal.wav');
reset(wavFile,1);


blockread(wavFile,wavHeader,sizeof(Twavheader));


with wavHeader do begin
writeln(RIFF[0],RIFF[1],RIFF[2],RIFF[3]); // RIFF Header Magic header
writeln('RIFF Chunk Size ',ChunkSize);
writeln(WAVE[0],WAVE[1],WAVE[2],WAVE[3]); // WAVE Header

writeln(fmt[0],fmt[1],fmt[2],fmt[3]); // FMT header


writeln('Size of the fmt chunk ',Subchunk1Size);
writeln('Audio format ',AudioFormat); // Audio format 1=PCM,6=mulaw,7=alaw, 257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM
writeln('Number of channels ', NumOfChan); // Number of channels 1=Mono 2=Sterio
writeln('Sampling Frequency in Hz ',SamplesPerSec); // Sampling Frequency in Hz
writeln('bytes per second ',bytesPerSec); // bytes per second
writeln('blockAlign ',blockAlign); // 2=16-bit mono, 4=16-bit stereo
writeln('Number of bits per sample ',bitsPerSample); // Number of bits per sample

writeln(Subchunk2ID[0],Subchunk2ID[1],Subchunk2ID[2],Subchunk2ID[3]); // "data" string
writeln('Sampled data length ',Subchunk2Size); // Sampled data length
end;




// stereo 16-bit interleaved linear PCM audio data at 48kHz in SNORM format


auDesc.mSampleRate := wavHeader.SamplesPerSec;
auDesc.mFormatID := kAudioFormatLinearPCM;
auDesc.mFormatFlags := kLinearPCMFormatFlagIsFloat or kLinearPCMFormatFlagIsPacked;


auDesc.mBytesPerPacket := 8;
auDesc.mFramesPerPacket := 1;
auDesc.mBytesPerFrame := 8;
auDesc.mChannelsPerFrame := 2;
auDesc.mBitsPerChannel := 32;


{
auDesc.mSampleRate := 48000;
auDesc.mFormatID := kAudioFormatLinearPCM;
auDesc.mFormatFlags := kLinearPCMFormatFlagIsBigEndian or kLinearPCMFormatFlagIsSignedInteger or kLinearPCMFormatFlagIsPacked;
auDesc.mBytesPerPacket := 4;
auDesc.mFramesPerPacket := 1;
auDesc.mBytesPerFrame := 4;
auDesc.mChannelsPerFrame := 2;
auDesc.mBitsPerChannel := 16;
}

// our persistent state for sound playback
soundState.done:=false;

// most of the 0 and nullptr params here are for compressed sound formats etc.
err := AudioQueueNewOutput(auDesc, @auCallback, @soundState, nil, nil, 0, auQueue);
checkError(err);




// generate buffers holding at most 1/16th of a second of data
bufferSize := round(auDesc.mBytesPerFrame * (auDesc.mSampleRate / 16));
writeln(bufferSize);




err := AudioQueueAllocateBuffer(auQueue, bufferSize, auBuffers[0]);
checkError(err);


//err := AudioQueueAllocateBuffer(auQueue, bufferSize, auBuffers[1]);
//checkError(err);


// prime the buffers
auCallback(@soundState, auQueue, auBuffers[0]);
//auCallback(@soundState, auQueue, auBuffers[1]);


// enqueue for playing
AudioQueueEnqueueBuffer(auQueue, auBuffers[0], 0, nil);
//AudioQueueEnqueueBuffer(auQueue, auBuffers[1], 0, nil);


// go!
AudioQueueStart(auQueue, nil);






// Our AudioQueue creation options put the CA handling on its own thread
// so this is a quick hack to allow us to hear some sound.



readln;



// be nice even it doesn't really matter at this point

AudioQueueDispose(auQueue, true);
end.






what I'm doing wrong?