i'm trying creating engine and output Mix:
// create engine
this->res = slCreateEngine(&this->engineObject, 0, NULL, 0, NULL, NULL);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Create Engine.");
this->Free();
return;
}
this->res = (*this->engineObject)->Realize(this->engineObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Realize Engine.");
this->Free();
return;
}
this->res = (*this->engineObject)->GetInterface(this->engineObject, SL_IID_ENGINE, &this->engineEngine);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't GetInterface Engine.");
this->Free();
return;
}
// create output mix
this->res = (*this->engineEngine)->CreateOutputMix(this->engineEngine, &this->outputmixObject, 0, NULL, NULL);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Create Output Mix.");
this->Free();
return;
}
this->res = (*this->outputmixObject)->Realize(this->outputmixObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Realize Output Mix.");
this->Free();
return;
}
that is successful!
Start playing mp3 stream from IceCast
(example "http://example.com/stream320"):
SLDataLocator_URI loc_uri = {SL_DATALOCATOR_URI, filename};
SLDataFormat_MIME format_mime = {SL_DATAFORMAT_MIME, (SLchar*)NULL, SL_CONTAINERTYPE_UNSPECIFIED};
SLDataSource audioSrcuri = {&loc_uri, &format_mime};
SLDataLocator_OutputMix dataLocatorOut = {SL_DATALOCATOR_OUTPUTMIX,this->outputmixObject};
SLDataSink audioSnk = {&dataLocatorOut, NULL};
const SLInterfaceID pIDs[3] = {SL_IID_PLAY, SL_IID_SEEK, SL_IID_PREFETCHSTATUS};
const SLboolean pIDsRequired[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
this->res = (*this->engineEngine)->CreateAudioPlayer(this->engineEngine, &this->player, &audioSrcuri, &audioSnk, 3, pIDs, pIDsRequired);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Create Audio Player.");
return;
}
this->res = (*this->player)->Realize(this->player, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Realize Audio Player.");
return;
}
this->res = (*this->player)->GetInterface(this->player, SL_IID_PLAY, &this->playerInterface);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Get Interface \"SL_IID_PLAY\" from Audio Player.");
return;
}
this->res = (*this->player)->GetInterface(this->player, SL_IID_SEEK, &this->seekInterface);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Get Interface \"SL_IID_SEEK\" from Audio Player.");
return;
}
this->res = (*this->player)->GetInterface(this->player, SL_IID_PREFETCHSTATUS, &this->prefetchInterface);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Get Interface \"SL_IID_PREFETCHSTATUS\" from Audio Player.");
return;
}
this->res = (*this->playerInterface)->SetPlayState(this->playerInterface, SL_PLAYSTATE_PAUSED);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Set Play State \"SL_PLAYSTATE_PAUSED\".");
return;
}
this->LastPlayState = SL_PLAYSTATE_PAUSED;
/*SLuint32 prefetchStatus = SL_PREFETCHSTATUS_UNDERFLOW;
while (prefetchStatus != SL_PREFETCHSTATUS_SUFFICIENTDATA)
{
LOGI("Wait until there's data to play...");
usleep(1 * 1000);
(*this->prefetchInterface)->GetPrefetchStatus(this->prefetchInterface, &prefetchStatus);
}
LOGI("Data OK.");
/* Get duration */
SLmillisecond durationInMsec = SL_TIME_UNKNOWN;
this->res = (*this->playerInterface)->GetDuration(this->playerInterface, &durationInMsec);
if (SL_RESULT_SUCCESS != this->res)
{
LOGI("Can't Get Duration.");
return;
}
if (durationInMsec == SL_TIME_UNKNOWN) {
LOGI("durationInMsec = SL_TIME_UNKNOWN");
//durationInMsec = 5000;
}
(*this->seekInterface)->SetLoop(this->seekInterface,SL_BOOLEAN_FALSE,0,SL_TIME_UNKNOWN);
filename = "http://example.com/stream320";
LogCat began print a Error msg...:
E/libOpenSLES(16432): MEDIA_BUFFERING_UPDATE -491520000% < 0
E/libOpenSLES(16432): MEDIA_BUFFERING_UPDATE -655360000% < 0
E/libOpenSLES(16432): MEDIA_BUFFERING_UPDATE -819200000% < 0
...
but stream is playing!
Ok, so i'm trying stop playing:
this->res = (*this->playerInterface)->SetPlayState(this->playerInterface, SL_PLAYSTATE_STOPPED);
if (SL_RESULT_SUCCESS != this->res)
LOGI("Can't Set Play State \"SL_PLAYSTATE_STOPPED\".");
OK! Play really is stopped, but player continues downloading stream.
???
So i'm try destroy PlayerObj:
(*this->player)->Destroy(this->player);
LogCat printed Error msg:
A//system/bin/app_process(16690): stack corruption detected: aborted
and app process terminated.
What is wrong?
Related
I have a problem to be able to read audio using JavaCPP FFMpeg library. I don’t know how to pass it to java sound and I don’t know too if my code is correct.
Let’s see the more important part of my code (video is OK so I drop this) :
The variables :
//==========================================================================
// FFMpeg 4.x - Video and Audio
//==========================================================================
private final AVFormatContext pFormatCtx = new AVFormatContext(null);
private final AVDictionary OPTIONS_DICT = null;
private AVPacket pPacket = new AVPacket();
//==========================================================================
// FFMpeg 4.x - Audio
//==========================================================================
private AVCodec pAudioCodec;
private AVCodecContext pAudioCodecCtx;
private final List<StreamInfo> audioStreams = new ArrayList<>();
private int audio_data_size;
private final BytePointer audio_data = new BytePointer(0);
private int audio_ret;
private AVFrame pAudioDecodedFrame = null;
private AVCodecParserContext pAudioParser;
private SwrContext audio_swr_ctx = null;
Then I call prepare functions in this order :
private void prepareFirst() throws Exception{
oldFile = file;
// Initialize packet and check for error
pPacket = av_packet_alloc();
if(pPacket == null){
throw new Exception("ALL: Couldn't allocate packet");
}
// Open video file
if (avformat_open_input(pFormatCtx, file.getPath(), null, null) != 0) {
throw new Exception("ALL: Couldn't open file");
}
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, (PointerPointer)null) < 0) {
throw new Exception("ALL: Couldn't find stream information");
}
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, file.getPath(), 0);
// Find the first audio/video stream
for (int i = 0; i < pFormatCtx.nb_streams(); i++) {
switch(pFormatCtx.streams(i).codecpar().codec_type()){
case AVMEDIA_TYPE_VIDEO -> videoStreams.add(new StreamInfo(i, pFormatCtx.streams(i)));
case AVMEDIA_TYPE_AUDIO -> audioStreams.add(new StreamInfo(i, pFormatCtx.streams(i)));
}
}
if(videoStreams.isEmpty() && type != PlayType.AudioOnly){
throw new Exception("Didn't find an audio stream");
}
if(audioStreams.isEmpty() && type != PlayType.VideoOnly){
throw new Exception("Didn't find a video stream");
}
}
private void prepareAudio() throws Exception{
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// AUDIO
//------------------------------------------------------------------
if(audioStreams.isEmpty() == false){
//===========================
//------------
// // Let's search for AVCodec
// pAudioCodec = avcodec_find_decoder(pFormatCtx.streams(audioStreams.get(0).getStreamIndex()).codecpar().codec_id());
// if (pAudioCodec == null) {
// throw new Exception("AUDIO: Unsupported codec or not found!");
// }
//
// // Let's alloc AVCodecContext
// pAudioCodecCtx = avcodec_alloc_context3(pAudioCodec);
// if (pAudioCodecCtx == null) {
// throw new Exception("AUDIO: Unallocated codec context or not found!");
// }
// Get a pointer to the codec context for the video stream
pAudioCodecCtx = pFormatCtx.streams(audioStreams.get(0).getStreamIndex()).codec();
// Find the decoder for the video stream
pAudioCodec = avcodec_find_decoder(pAudioCodecCtx.codec_id());
if (pAudioCodec == null) {
throw new Exception("AUDIO: Unsupported codec or not found!");
}
//===========================
//------------
/* open it */
if (avcodec_open2(pAudioCodecCtx, pAudioCodec, OPTIONS_DICT) < 0) {
throw new Exception("AUDIO: Could not open codec");
}
pAudioDecodedFrame = av_frame_alloc();
if (pAudioDecodedFrame == null){
throw new Exception("AUDIO: DecodedFrame allocation failed");
}
audio_swr_ctx = swr_alloc_set_opts(
null, // existing Swr context or NULL
AV_CH_LAYOUT_STEREO, // output channel layout (AV_CH_LAYOUT_*)
AV_SAMPLE_FMT_S16, // output sample format (AV_SAMPLE_FMT_*).
44100, // output sample rate (frequency in Hz)
pAudioCodecCtx.channels(), // input channel layout (AV_CH_LAYOUT_*)
pAudioCodecCtx.sample_fmt(), // input sample format (AV_SAMPLE_FMT_*).
pAudioCodecCtx.sample_rate(), // input sample rate (frequency in Hz)
0, // logging level offset
null // parent logging context, can be NULL
);
swr_init(audio_swr_ctx);
av_samples_fill_arrays(
pAudioDecodedFrame.data(), // audio_data,
pAudioDecodedFrame.linesize(), // linesize
audio_data, // buf
(int)AV_CH_LAYOUT_STEREO, // nb_channels
44100, // nb_samples
AV_SAMPLE_FMT_S16, // sample_fmt
0 // align
);
}
// Audio treatment end ---------------------------------------------
//==================================================================
}
And then when I launch the thread :
private void doPlay() throws Exception{
av_init_packet(pPacket);
// Read frames
while (av_read_frame(pFormatCtx, pPacket) >= 0) {
if (type != PlayType.AudioOnly && pPacket.stream_index() == videoStreams.get(0).getStreamIndex()) {
// Is this a packet from the video stream?
decodeVideo();
renewPacket();
}
if (type != PlayType.VideoOnly && pPacket.stream_index() == audioStreams.get(0).getStreamIndex()) {
// Is this a packet from the audio stream?
if(pPacket.size() > 0){
decodeAudio();
renewPacket();
}
}
}
}
private void renewPacket(){
// Free the packet that was allocated by av_read_frame
av_packet_unref(pPacket);
pPacket.data(null);
pPacket.size(0);
av_init_packet(pPacket);
}
And again, this is where I don’t read audio :
private void decodeAudio() throws Exception{
do {
audio_ret = avcodec_send_packet(pAudioCodecCtx, pPacket);
} while(audio_ret == AVERROR_EAGAIN());
System.out.println("packet sent return value: " + audio_ret);
if(audio_ret == AVERROR_EOF || audio_ret == AVERROR_EINVAL()) {
StringBuilder sb = new StringBuilder();
Formatter formatter = new Formatter(sb, Locale.US);
formatter.format("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n", AVERROR_EAGAIN(), AVERROR_EOF, AVERROR_EINVAL());
formatter.format("Audio frame getting error (%d)!\n", audio_ret);
throw new Exception(sb.toString());
}
audio_ret = avcodec_receive_frame(pAudioCodecCtx, pAudioDecodedFrame);
System.out.println("frame received return value: " + audio_ret);
audio_data_size = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
if (audio_data_size < 0) {
/* This should not occur, checking just for paranoia */
throw new Exception("Failed to calculate data size");
}
double frame_nb = 44100d / pAudioCodecCtx.sample_rate() * pAudioDecodedFrame.nb_samples();
long out_count = Math.round(Math.floor(frame_nb));
int out_samples = swr_convert(
audio_swr_ctx,
audio_data,
(int)out_count,
pAudioDecodedFrame.data(0),
pAudioDecodedFrame.nb_samples()
);
if (out_samples < 0) {
throw new Exception("AUDIO: Error while converting");
}
int dst_bufsize = av_samples_get_buffer_size(
pAudioDecodedFrame.linesize(),
(int)AV_CH_LAYOUT_STEREO,
out_samples,
AV_SAMPLE_FMT_S16,
1
);
AudioFormat audioFormat = new AudioFormat(
pAudioDecodedFrame.sample_rate(),
16,
2,
true,
false
);
BytePointer bytePointer = pAudioDecodedFrame.data(0);
ByteBuffer byteBuffer = bytePointer.asBuffer();
byte[] bytes = new byte[byteBuffer.remaining()];
byteBuffer.get(bytes);
try (SourceDataLine sdl = AudioSystem.getSourceDataLine(audioFormat)) {
sdl.open(audioFormat);
sdl.start();
sdl.write(bytes, 0, bytes.length);
sdl.drain();
sdl.stop();
} catch (LineUnavailableException ex) {
Logger.getLogger(AVEntry.class.getName()).log(Level.SEVERE, null, ex);
}
}
Do you have an idea ?
I followed this NAudio Demo modified to play ShoutCast.
In my full code I have to resample the incoming audio and stream it again over the network to a network player. Since I get many "clicks and pops", I came back to the demo code and I found that these artifacts are originated after the decoding block.
If I save the incoming stream in mp3 format, it is pretty clear.
When I save the raw decoded data (without other processing than the decoder) I get many audio artifacts.
I wonder whether I am doing some error, even if my code is almost equal to the NAudio demo.
Here the function from the example as modified by me to save the raw data. It is called as a new Thread.
private void StreamMP3(object state)
{
//Configuration config = ConfigurationManager.OpenExeConfiguration(ConfigurationUserLevel.None);
//SettingsSection section = (SettingsSection)config.GetSection("system.net/settings");
this.fullyDownloaded = false;
string url = "http://icestreaming.rai.it/5.mp3";//(string)state;
webRequest = (HttpWebRequest)WebRequest.Create(url);
int metaInt = 0; // blocksize of mp3 data
int framesize = 0;
webRequest.Headers.Clear();
webRequest.Headers.Add("GET", "/ HTTP/1.0");
// needed to receive metadata informations
webRequest.Headers.Add("Icy-MetaData", "1");
webRequest.UserAgent = "WinampMPEG/5.09";
HttpWebResponse resp = null;
try
{
resp = (HttpWebResponse)webRequest.GetResponse();
}
catch (WebException e)
{
if (e.Status != WebExceptionStatus.RequestCanceled)
{
ShowError(e.Message);
}
return;
}
byte[] buffer = new byte[16384 * 4]; // needs to be big enough to hold a decompressed frame
try
{
// read blocksize to find metadata block
metaInt = Convert.ToInt32(resp.GetResponseHeader("icy-metaint"));
}
catch
{
}
IMp3FrameDecompressor decompressor = null;
byteOut = createNewFile(destPath, "salva", "raw");
try
{
using (var responseStream = resp.GetResponseStream())
{
var readFullyStream = new ReadFullyStream(responseStream);
readFullyStream.metaInt = metaInt;
do
{
if (mybufferedWaveProvider != null && mybufferedWaveProvider.BufferLength - mybufferedWaveProvider.BufferedBytes < mybufferedWaveProvider.WaveFormat.AverageBytesPerSecond / 4)
{
Debug.WriteLine("Buffer getting full, taking a break");
Thread.Sleep(500);
}
else
{
Mp3Frame frame = null;
try
{
frame = Mp3Frame.LoadFromStream(readFullyStream, true);
if (metaInt > 0)
UpdateSongName(readFullyStream.SongName);
else
UpdateSongName("No Song Info in Stream...");
}
catch (EndOfStreamException)
{
this.fullyDownloaded = true;
// reached the end of the MP3 file / stream
break;
}
catch (WebException)
{
// probably we have aborted download from the GUI thread
break;
}
if (decompressor == null)
{
// don't think these details matter too much - just help ACM select the right codec
// however, the buffered provider doesn't know what sample rate it is working at
// until we have a frame
WaveFormat waveFormat = new Mp3WaveFormat(frame.SampleRate, frame.ChannelMode == ChannelMode.Mono ? 1 : 2, frame.FrameLength, frame.BitRate);
decompressor = new AcmMp3FrameDecompressor(waveFormat);
this.mybufferedWaveProvider = new BufferedWaveProvider(decompressor.OutputFormat);
this.mybufferedWaveProvider.BufferDuration = TimeSpan.FromSeconds(200); // allow us to get well ahead of ourselves
framesize = (decompressor.OutputFormat.Channels * decompressor.OutputFormat.SampleRate * (decompressor.OutputFormat.BitsPerSample / 8) * 20) / 1000;
//this.bufferedWaveProvider.BufferedDuration = 250;
}
int decompressed = decompressor.DecompressFrame(frame, buffer, 0);
//Debug.WriteLine(String.Format("Decompressed a frame {0}", decompressed));
mybufferedWaveProvider.AddSamples(buffer, 0, decompressed);
while (mybufferedWaveProvider.BufferedDuration.Milliseconds >= 20)
{
byte[] read = new byte[framesize];
mybufferedWaveProvider.Read(read, 0, framesize);
byteOut.Write(read, 0, framesize);
}
}
} while (playbackState != StreamingPlaybackState.Stopped);
Debug.WriteLine("Exiting");
// was doing this in a finally block, but for some reason
// we are hanging on response stream .Dispose so never get there
decompressor.Dispose();
}
}
finally
{
if (decompressor != null)
{
decompressor.Dispose();
}
}
}
OK i found the problem. I included the shoutcast metadata to the MP3Frame.
See the comment "HERE I COLLECT THE BYTES OF THE MP3 FRAME" to locate the correct point to get the MP3 frame with no streaming metadata.
The following code runs without audio artifacts:
private void SHOUTcastReceiverThread()
{
//-*- String server = "http://216.235.80.18:8285/stream";
//String serverPath = "/";
//String destPath = "C:\\temp\\"; // destination path for saved songs
HttpWebRequest request = null; // web request
HttpWebResponse response = null; // web response
int metaInt = 0; // blocksize of mp3 data
int count = 0; // byte counter
int metadataLength = 0; // length of metadata header
string metadataHeader = ""; // metadata header that contains the actual songtitle
string oldMetadataHeader = null; // previous metadata header, to compare with new header and find next song
//CircularQueueStream framestream = new CircularQueueStream(2048);
QueueStream framestream = new QueueStream();
framestream.Position = 0;
bool bNewSong = false;
byte[] buffer = new byte[512]; // receive buffer
byte[] dec_buffer = new byte[decSIZE];
Mp3Frame frame;
IMp3FrameDecompressor decompressor = null;
Stream socketStream = null; // input stream on the web request
// create web request
request = (HttpWebRequest)WebRequest.Create(server);
// clear old request header and build own header to receive ICY-metadata
request.Headers.Clear();
request.Headers.Add("GET", serverPath + " HTTP/1.0");
request.Headers.Add("Icy-MetaData", "1"); // needed to receive metadata informations
request.UserAgent = "WinampMPEG/5.09";
// execute request
try
{
response = (HttpWebResponse)request.GetResponse();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
return;
}
// read blocksize to find metadata header
metaInt = Convert.ToInt32(response.GetResponseHeader("icy-metaint"));
try
{
// open stream on response
socketStream = response.GetResponseStream();
var readFullyStream = new ReadFullyStream(socketStream);
frame = null;
// rip stream in an endless loop
do
{
if (IsBufferNearlyFull)
{
Debug.WriteLine("Buffer getting full, taking a break");
Thread.Sleep(500);
frame = null;
}
else
{
int bufLen = readFullyStream.Read(buffer, 0, buffer.Length);
try
{
if (framestream.CanRead && framestream.Length > 512)
frame = Mp3Frame.LoadFromStream(framestream);
else
frame = null;
}
catch (Exception ex)
{
frame = null;
}
if (bufLen < 0)
{
Debug.WriteLine("Buffer error 1: exit.");
return;
}
// processing RAW data
for (int i = 0; i < bufLen; i++)
{
// if there is a header, the 'headerLength' would be set to a value != 0. Then we save the header to a string
if (metadataLength != 0)
{
metadataHeader += Convert.ToChar(buffer[i]);
metadataLength--;
if (metadataLength == 0) // all metadata informations were written to the 'metadataHeader' string
{
string fileName = "";
string fileNameRaw = "";
// if songtitle changes, create a new file
if (!metadataHeader.Equals(oldMetadataHeader))
{
// flush and close old byteOut stream
if (byteOut != null)
{
byteOut.Flush();
byteOut.Close();
byteOut = null;
}
if (byteOutRaw != null)
{
byteOutRaw.Flush();
byteOutRaw.Close();
byteOutRaw = null;
}
timeStart = timeEnd;
// extract songtitle from metadata header. Trim was needed, because some stations don't trim the songtitle
//fileName = Regex.Match(metadataHeader, "(StreamTitle=')(.*)(';StreamUrl)").Groups[2].Value.Trim();
fileName = Regex.Match(metadataHeader, "(StreamTitle=')(.*)(';)").Groups[2].Value.Trim();
// write new songtitle to console for information
if (fileName.Length == 0)
fileName = "shoutcast_test";
fileNameRaw = fileName + "_raw";
framestream.reSetPosition();
SongChanged(this, metadataHeader);
bNewSong = true;
// create new file with the songtitle from header and set a stream on this file
timeEnd = DateTime.Now;
if (bWrite_to_file)
{
byteOut = createNewFile(destPath, fileName, "mp3");
byteOutRaw = createNewFile(destPath, fileNameRaw, "raw");
}
timediff = timeEnd - timeStart;
// save new header to 'oldMetadataHeader' string, to compare if there's a new song starting
oldMetadataHeader = metadataHeader;
}
metadataHeader = "";
}
}
else // write mp3 data to file or extract metadata headerlength
{
if (count++ < metaInt) // write bytes to filestream
{
//HERE I COLLECT THE BYTES OF THE MP3 FRAME
framestream.Write(buffer, i, 1);
}
else // get headerlength from lengthbyte and multiply by 16 to get correct headerlength
{
metadataLength = Convert.ToInt32(buffer[i]) * 16;
count = 0;
}
}
}//for
if (bNewSong)
{
decompressor = createDecompressor(frame);
bNewSong = false;
}
if (frame != null && decompressor != null)
{
framedec(decompressor, frame);
}
// fine Processing dati RAW
}//Buffer is not full
SHOUTcastStatusProcess();
} while (playbackState != StreamingPlaybackState.Stopped);
} //try
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
finally
{
if (byteOut != null)
byteOut.Close();
if (socketStream != null)
socketStream.Close();
if (decompressor != null)
{
decompressor.Dispose();
decompressor = null;
}
if (null != request)
request.Abort();
if (null != framestream)
framestream.Dispose();
if (null != bufferedWaveProvider)
bufferedWaveProvider.ClearBuffer();
//if (null != bufferedWaveProviderOut)
// bufferedWaveProviderOut.ClearBuffer();
if (null != mono16bitFsinStream)
{
mono16bitFsinStream.Close();
mono16bitFsinStream.Dispose();
}
if (null != middleStream2)
{
middleStream2.Close();
middleStream2.Dispose();
}
if (null != resampler)
resampler.Dispose();
}
}
public class QueueStream : MemoryStream
{
long ReadPosition = 0;
long WritePosition = 0;
public QueueStream() : base() { }
public override int Read(byte[] buffer, int offset, int count)
{
Position = ReadPosition;
var temp = base.Read(buffer, offset, count);
ReadPosition = Position;
return temp;
}
public override void Write(byte[] buffer, int offset, int count)
{
Position = WritePosition;
base.Write(buffer, offset, count);
WritePosition = Position;
}
public void reSetPosition()
{
WritePosition = 0;
ReadPosition = 0;
Position = 0;
}
}
private void framedec(IMp3FrameDecompressor decompressor, Mp3Frame frame)
{
int Ndecoded_samples = 0;
byte[] dec_buffer = new byte[decSIZE];
Ndecoded_samples = decompressor.DecompressFrame(frame, dec_buffer, 0);
bufferedWaveProvider.AddSamples(dec_buffer, 0, Ndecoded_samples);
NBufferedSamples += Ndecoded_samples;
brcnt_in.incSamples(Ndecoded_samples);
if (Ndecoded_samples > decSIZE)
{
Debug.WriteLine(String.Format("Too many samples {0}", Ndecoded_samples));
}
if (byteOut != null)
byteOut.Write(frame.RawData, 0, frame.RawData.Length);
if (byteOutRaw != null) // as long as we don't have a songtitle, we don't open a new file and don't write any bytes
byteOutRaw.Write(dec_buffer, 0, Ndecoded_samples);
frame = null;
}
private IMp3FrameDecompressor createDecompressor(Mp3Frame frame)
{
IMp3FrameDecompressor dec = null;
if (frame != null)
{
// don't think these details matter too much - just help ACM select the right codec
// however, the buffered provider doesn't know what sample rate it is working at
// until we have a frame
WaveFormat srcwaveFormat = new Mp3WaveFormat(frame.SampleRate, frame.ChannelMode == ChannelMode.Mono ? 1 : 2, frame.FrameLength, frame.BitRate);
dec = new AcmMp3FrameDecompressor(srcwaveFormat);
bufferedWaveProvider = new BufferedWaveProvider(dec.OutputFormat);// decompressor.OutputFormat
bufferedWaveProvider.BufferDuration = TimeSpan.FromSeconds(400); // allow us to get well ahead of ourselves
// ------------------------------------------------
//Create an intermediate format with same sampling rate, 16 bit, mono
middlewavformat = new WaveFormat(dec.OutputFormat.SampleRate, 16, 1);
outwavFormat = new WaveFormat(Fs_out, 16, 1);
// wave16ToFloat = new Wave16ToFloatProvider(provider); // I have tried with and without this converter.
wpws = new WaveProviderToWaveStream(bufferedWaveProvider);
//Check middlewavformat.Encoding == WaveFormatEncoding.Pcm;
mono16bitFsinStream = new WaveFormatConversionStream(middlewavformat, wpws);
middleStream2 = new BlockAlignReductionStream(mono16bitFsinStream);
resampler = new MediaFoundationResampler(middleStream2, outwavFormat);
}
return dec;
}
The following code is working with API 21 and above. However, when I run it on below 21, no data shows up on TextureView.
My configuration for MediaCodec:
MediaFormat format = MediaFormat.createVideoFormat(MediaFormat.MIMETYPE_VIDEO_AVC, 1920, 1080);
format.setByteBuffer("csd-0", ByteBuffer.allocate(100));
format.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 100000);
try {
m_codec = MediaCodec.createDecoderByType(MediaFormat.MIMETYPE_VIDEO_AVC);
m_codec.configure(format, new Surface(m_surface.getSurfaceTexture()), null, 0);
m_codec.start();
} catch (Exception e) {
e.printStackTrace();
}
The decoding part:
int inputIndex = m_codec.dequeueInputBuffer(-1);
if (inputIndex >= 0) {
ByteBuffer buffer;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
buffer = m_codec.getInputBuffer(inputIndex);
buffer.clear();
}
else {
buffer=m_codec.getInputBuffers()[inputIndex];
buffer.put(videoBuffer,0,info.size);
}
if (buffer != null) {
buffer.put(videoBuffer, info.offset,videoBuffer.length);
m_codec.queueInputBuffer(inputIndex, 0, videoBuffer.length, 0, 0);
}
}
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
int outputIndex = m_codec.dequeueOutputBuffer(info, 0);
if (outputIndex >= 0) {
m_codec.releaseOutputBuffer(outputIndex, true);
}
EDIT:
I got the solution,when i removed below line, now it's working
format.setByteBuffer("csd-0", ByteBuffer.allocate(100));
I have my application playing the radio stream which is shoutcast streaming. I have searched on the internet for 2 days but my application stills can't play this url. I don't know how to solve this.
This is my sample source code.
String url1 = "http://203.150.224.142:8003/;
HttpConnection con= (HttpConnection)Connector.open(url1);
InputStream is = con.openInputStream();
player = Manager.createPlayer(is, "audio/mpeg");
player.realize();
player.prefetch();
player.start();
I have used this code in production, it should work for you also.
HttpConnection conn = (HttpConnection) Connector.open(music.getTrack_url() + "?streamable=true", Connector.READ_WRITE);
if (conn.getResponseCode() == HttpConnection.HTTP_OK) {
is = conn.openInputStream();
player = Manager.createPlayer(is, "audio/mp3");
player.addPlayerListener(thisObj);
player.realize();
player.prefetch();
player.start();
}
Try This.
public void loadShoutcast(String url)
{
StreamConnection connection = null;
int BUFFER_SIZE = 1024;
DataOutputStream dos_ = null;
OutputStream os_ = null;
try
{
System.out.println("opening connection " + url);
//Shoutcast URL
connection = (StreamConnection) Connector.open(url);
os_ = connection.openOutputStream();
dos_ = new DataOutputStream(os_);
// send the HTTP request
String req = "GET / HTTP/1.1\r\nUser-Agent: Profile/MIDP-1.0 Configuration/CLDC-1.0\r\n\r\n";
dos_.write(req.getBytes());
is = null;
is = connection.openInputStream();
long byteDone = 0;
int byteCount;
byte[] buffer = new byte[BUFFER_SIZE];
//Connection to file where you want to save the content of shoutcast radio
//It can be skipped in case you dont want to save the contents
out = tempFilecon.openDataOutputStream();
System.out.println("starting download");
while (byteCount = is.read(buffer)) >= 0)
{
out.write(buffer, 0, byteCount);
byteDone += byteCount;
done += byteCount;
}
return;
}
catch (InterruptedIOException e)
{
System.out.println("InterruptedIOException 1" + e.getMessage());
return;
}
catch (Exception e)
{
System.out.println("ERROR - 51 " + e.getMessage());
return;
}
finally
{
if (dos_ != null)
{
dos_.close();
dos_ = null;
}
if (os_ != null)
{
os_.close();
os_ = null;
}
if (is != null)
{
is.close();
is = null;
}
if (out != null)
{
out.close();
out = null;
}
System.out.println("closing connection");
if (connection != null)
{
connection.close();
connection = null;
}
}
// return false;
}
I have created a multichannel audio system in OpenSL ES on the Android NDK utilizing PCM buffer queues. I cannot seem to get the OS to support SL_IID_RATEPITCH and SL_IID_VOLUME despite the Android docs saying that these two interfaces are supported. Below is my initialization code. Am I doing something wrong?
static SLresult InitChannel(int i)
{
SLresult lRes;
OpenSLChannel *channel = &sndc[i];
// Initialize stuff for playing PCM channels
// Set-up sound audio source.
SLDataLocator_AndroidSimpleBufferQueue lDataLocatorIn;
lDataLocatorIn.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
// At most one buffer in the queue.
lDataLocatorIn.numBuffers = 1;
SLDataFormat_PCM lDataFormat;
lDataFormat.formatType = SL_DATAFORMAT_PCM;
lDataFormat.numChannels = 1; // Mono sound.
lDataFormat.samplesPerSec = SL_SAMPLINGRATE_22_05; // BASE_FREQUENCY
lDataFormat.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
lDataFormat.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
lDataFormat.channelMask = SL_SPEAKER_FRONT_CENTER;
lDataFormat.endianness = SL_BYTEORDER_LITTLEENDIAN;
SLDataSource lDataSource;
lDataSource.pLocator = &lDataLocatorIn;
lDataSource.pFormat = &lDataFormat;
SLDataLocator_OutputMix lDataLocatorOut;
lDataLocatorOut.locatorType = SL_DATALOCATOR_OUTPUTMIX;
lDataLocatorOut.outputMix = mOutputMixObj;
SLDataSink lDataSink;
lDataSink.pLocator = &lDataLocatorOut;
lDataSink.pFormat = NULL;
// Create and realize the sound player.
// We are going to need its SL_IID_PLAY and also SL_IID_BUFFERQUEUE interface
// now available thanks to the data locator configured in the previous step.
const SLuint32 lSoundPlayerIIDCount = 4;
const SLInterfaceID lSoundPlayerIIDs[] = { SL_IID_PLAY, SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_RATEPITCH };
const SLboolean lSoundPlayerReqs[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE };
lRes = (*mEngine)->CreateAudioPlayer(mEngine, &channel->mPlayerObj, &lDataSource, &lDataSink, lSoundPlayerIIDCount, lSoundPlayerIIDs, lSoundPlayerReqs);
if (lRes != SL_RESULT_SUCCESS)
return lRes;
lRes = (*channel->mPlayerObj)->Realize(channel->mPlayerObj, SL_BOOLEAN_FALSE);
if (lRes != SL_RESULT_SUCCESS)
return lRes;
lRes = (*channel->mPlayerObj)->GetInterface(channel->mPlayerObj, SL_IID_PLAY, &channel->mPlayer);
if (lRes != SL_RESULT_SUCCESS)
return lRes;
lRes = (*channel->mPlayerObj)->GetInterface(channel->mPlayerObj, SL_IID_BUFFERQUEUE, &channel->mPlayerQueue);
if (lRes != SL_RESULT_SUCCESS)
return lRes;
// Get Volume Interface
lRes = (*channel->mPlayerObj)->GetInterface(channel->mPlayerObj, SL_IID_VOLUME, &channel->mVolume);
if (lRes != SL_RESULT_SUCCESS)
{
Err_Printf("Volume interface not supported.\n");
// return lRes;
}
lRes = (*channel->mPlayerObj)->GetInterface(channel->mPlayerObj, SL_IID_RATEPITCH, &channel->mRatePitch);
if (lRes != SL_RESULT_SUCCESS)
{
Err_Printf("RatePitch interface not supported.\n");
// return lRes;
}
lRes = (*channel->mPlayerQueue)->RegisterCallback(channel->mPlayerQueue, SoundFinished, channel);
// slCheckErrorWithStatus(lRes, "Problem registering player callback (Error %d).", lRes);
lRes = (*channel->mPlayer)->SetCallbackEventsMask(channel->mPlayer, SL_PLAYEVENT_HEADATEND);
// slCheckErrorWithStatus(lRes, "Problem registering player callback mask (Error %d).", lRes);
}
//
// SystemInit
//
// Initialization for
// the sound subsystem.
//
void SystemInit(void)
{
mEngineObj = NULL;
mEngine = NULL;
mOutputMixObj = NULL;
Err_Printf("Starting OpenSL ES...\n");
SLresult lRes;
const SLuint32 lEngineMixIIDCount = 1;
const SLInterfaceID lEngineMixIIDs[] = { SL_IID_ENGINE };
const SLboolean lEngineMixReqs[] = { SL_BOOLEAN_TRUE };
const SLuint32 lOutputMixIIDCount = 2;
const SLInterfaceID lOutputMixIIDs[] = { SL_IID_VOLUME, SL_IID_RATEPITCH };
const SLboolean lOutputMixReqs[] = { SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE };
lRes = slCreateEngine(&mEngineObj, 0, NULL, lEngineMixIIDCount, lEngineMixIIDs, lEngineMixReqs);
if (lRes != SL_RESULT_SUCCESS)
goto ERROR; // lolwut?
lRes = (*mEngineObj)->Realize(mEngineObj, SL_BOOLEAN_FALSE);
if (lRes != SL_RESULT_SUCCESS)
goto ERROR;
lRes = (*mEngineObj)->GetInterface(mEngineObj, SL_IID_ENGINE, &mEngine);
if (lRes != SL_RESULT_SUCCESS)
goto ERROR;
lRes = (*mEngine)->CreateOutputMix(mEngine, &mOutputMixObj, lOutputMixIIDCount, lOutputMixIIDs, lOutputMixReqs);
lRes = (*mOutputMixObj)->Realize(mOutputMixObj, SL_BOOLEAN_FALSE);
int i;
for (i = 0; i < NUMCHANNELS; i++)
{
lRes = InitChannel(i);
if (lRes != SL_RESULT_SUCCESS)
goto ERROR;
}
return;
ERROR:
Err_Printf("Error while starting OpenSL ES.");
SystemShutdown();
}
SL_IID_VOLUME is supported, however it only allows attenuation (max gain is 0). A good approximation of OpenAL's 0.0-1.0 is:
float attenuation = 1.0f / 1024.0f + gain * 1023.0f / 1024.0f;
float db = 3 * log10(attenuation) / log10(2);
SLmillibel setGain = (SLmillibel)(db * 1000);
SL_IID_RATEPITCH is not supported, but SL_IID_PLAYBACKRATE is. Unfortunately, it only allows setting the speed from 0.5 to 2.0 at this time.