private bool FeedDataAudioMPEG4Generic_aggregate(Stream pData, uint dataLength, uint processedLength, uint totalLength, uint absoluteTimestamp)
{
//1. We only support frame-by-frame approach
if (dataLength != totalLength)
{
WARN("Chunked mode not yet supported");
return true;
}
//2. Test if we need to send what we have so far
if (((14 + _audioData.Buffers[1].Length + _audioBuffer.GetAvaliableByteCounts() + 2 + dataLength - 7) > _maxRTPPacketSize)
|| (_audioData.Buffers[1].Length == 16))
{
//3. counter
_audioData.Buffers[0].Write(2, AudioCounter);
AudioCounter++;
//4. Timestamp
_audioData.Buffers[0].Write(4,
BaseConnectivity.ToRTPTS(absoluteTimestamp,
Capabilities.Aac._sampleRate));
//6. put the actual buffer
Array.Resize(ref _audioData.Buffers[2], (int) _audioBuffer.GetAvaliableByteCounts());
_audioBuffer.Read(_audioData.Buffers[2], 0, _audioData.Buffers[2].Length);
_audioData.Buffers[0].Write(12, (ushort)(_audioData.Buffers[1].Length * 8));
Connectivity.FeedAudioData(ref _audioData, absoluteTimestamp);
Array.Resize(ref _audioData.Buffers[1],0);
}
//3. AU-Header
var auHeader = (dataLength - 7) << 3;
auHeader = auHeader | (byte)(_audioData.Buffers[1].Length/2);
Array.Resize(ref _audioData.Buffers[1], _audioData.Buffers[1].Length + 2);
_audioData.Buffers[1].Write(_audioData.Buffers[1].Length-2,(ushort)auHeader);
//4. Save the buffer
pData.Position += 7;
pData.CopyDataTo(_audioBuffer,(int) (dataLength-7));
pData.Position -= 7;
return true;
}