How do I mute sound using C? Is there an ALSA function call?
Are there any other function calls/APIs that can MUTE the microphone?
I have written some code to do audio playback using ALSA, and I have noticed a DELAY before the sound starts playing ... how do I reduce DELAY or LATENCY on sound playback?
x
#include <alsa/asoundlib.h>
void SetAlsaMasterMute()
{
long min, max;
snd_mixer_t *handle;
snd_mixer_selem_id_t *sid;
const char *card = "default";
const char *selem_name = "Master";
snd_mixer_open(&handle, 0);
snd_mixer_attach(handle, card);
snd_mixer_selem_register(handle, NULL, NULL);
snd_mixer_load(handle);
snd_mixer_selem_id_alloca(&sid);
snd_mixer_selem_id_set_index(sid, 0);
snd_mixer_selem_id_set_name(sid, selem_name);
snd_mixer_elem_t* elem = snd_mixer_find_selem(handle, sid);
if (snd_mixer_selem_has_playback_switch(elem)) {
snd_mixer_selem_set_playback_switch_all(elem, 0);
}
snd_mixer_close(handle);
}
references: here and official api here.
Related
I am trying to write random noise to to a device and allow my loop to sleep when I have written enough data. My understanding is that for each call to snd_pcm_writei I am writing 162 bytes (81 frames) which at 8khz rate and 16bit format it should be enough audio for ~10ms. I have verified that alsa does tell me I have written 81 frames.
I would expect that I can then sleep for a short amount of time before waking up and pushing the next 10 ms worth of data. However when I sleep for any amount - even a single ms - I start to get buffer underrun errors.
Obviously I have made an incorrect assumption somewhere. Can anyone point me to what I may be missing? I have removed most error checking to shorten the code - but there are no errors initializing the alsa system on my end. I would like to be able to push 10ms of audio and sleep (even for 1 ms) before pushing the next 10ms.
#include <alsa/asoundlib.h>
#include <spdlog/spdlog.h>
int main(int argc, char **argv) {
snd_pcm_t* handle;
snd_pcm_hw_params_t* hw;
unsigned int rate = 8000;
unsigned long periodSize = rate / 100; //period every 10 ms
int err = snd_pcm_open(&handle, "default", SND_PCM_STREAM_PLAYBACK, 0);
snd_pcm_hw_params_malloc(&hw);
snd_pcm_hw_params_any(handle, hw);
snd_pcm_hw_params_set_access(handle, hw, SND_PCM_ACCESS_RW_INTERLEAVED);
snd_pcm_hw_params_set_format(handle, hw, SND_PCM_FORMAT_S16_LE);
snd_pcm_hw_params_set_rate(handle, hw, rate, 0);
snd_pcm_hw_params_set_channels(handle, hw, 1);
int dir = 1;
snd_pcm_hw_params_set_period_size_near(handle, hw, &periodSize, &dir);
snd_pcm_hw_params(handle, hw);
snd_pcm_uframes_t frames;
snd_pcm_hw_params_get_period_size(hw, &frames, &dir);
int size = frames * 2; // two bytes a sample
char* buffer = (char*)malloc(size);
unsigned int periodTime;
snd_pcm_hw_params_get_period_time(hw,&periodTime, &dir);
snd_pcm_hw_params_free(hw);
snd_pcm_prepare(handle);
char* randomNoise = new char[size];
for(int i = 0; i < size; i++)
randomNoise[i] = random() % 0xFF;
while(true) {
err = snd_pcm_writei(handle, randomNoise, size/2);
if(err > 0) {
spdlog::info("Write {} frames", err);
} else {
spdlog::error("Error write {}\n", snd_strerror(err));
snd_pcm_recover(handle, err, 0);
continue;
}
usleep(1000); // <---- This is what causes the buffer underrun
}
}
Try to put in /etc/pulse/daemon.conf :
default-fragments = 5
default-fragment-size-msec = 2
and restart linux.
What I don't understand is why you write a buffer of size "size" to the device, and in the approximate calculations of time you rely on the "periodSize" declared by you. Then write a buffer with the size "periodSize" to the device.
I'm trying to use I2S and internal DAC to play WAV files from SPIFF on a Heltec WiFi LoRa 32 V2, using the Arduino IDE.
I have an audio amp and an oscilloscope hooked up to DAC2 (pin 25) of the board and I'm not getting any signal. I've simplified the problem by generating a sine wave (as in the ESP-IDF examples). Here's the code:
#include <Streaming.h>
#include <driver/i2s.h>
#include "freertos/queue.h"
#define SAMPLE_RATE (22050)
#define SAMPLE_SIZE 4000
#define PI (3.14159265)
#define I2S_BCK_IO (GPIO_NUM_26)
#define I2S_WS_IO (GPIO_NUM_25)
#define I2S_DO_IO (GPIO_NUM_22)
#define I2S_DI_IO (-1)
size_t i2s_bytes_write = 0;
static const int i2s_num = 0;
int sample_data[SAMPLE_SIZE];
i2s_config_t i2s_config = {
.mode = (i2s_mode_t)(I2S_MODE_MASTER | I2S_MODE_TX | I2S_MODE_DAC_BUILT_IN), // Only TX
.sample_rate = SAMPLE_RATE,
.bits_per_sample = I2S_BITS_PER_SAMPLE_16BIT,
.channel_format = I2S_CHANNEL_FMT_RIGHT_LEFT, //2-channels
.communication_format = (i2s_comm_format_t)I2S_COMM_FORMAT_I2S,
.intr_alloc_flags = 0,//ESP_INTR_FLAG_LEVEL1
.dma_buf_count = 8,
.dma_buf_len = 64,
.use_apll = false //Interrupt level 1
};
i2s_pin_config_t pin_config = {
.bck_io_num = I2S_BCK_IO,
.ws_io_num = I2S_WS_IO,
.data_out_num = I2S_DO_IO,
.data_in_num = I2S_DI_IO //Not used
};
static void setup_sine_wave()
{
unsigned int i;
int sample_val;
double sin_float;
size_t i2s_bytes_write = 0;
for (i = 0; i < SAMPLE_SIZE; i++)
{
sin_float = sin(i * PI / 180.0);
sin_float *= 127;
sample_val = (uint8_t)sin_float;
sample_data[i] = sample_val;
Serial << sample_data[i] << ",";
delay(1);
}
Serial << endl << "Sine wave generation complete" << endl;
}
void setup() {
pinMode(26, OUTPUT);
Serial.begin(115200);
i2s_driver_install(I2S_NUM_0, &i2s_config, 0, NULL);
//i2s_set_pin(I2S_NUM_0, NULL);
i2s_set_pin(I2S_NUM_0, &pin_config);
i2s_set_dac_mode(I2S_DAC_CHANNEL_RIGHT_EN);
i2s_set_sample_rates(I2S_NUM_0, 22050); //set sample rates
setup_sine_wave();
i2s_set_clk(I2S_NUM_0, SAMPLE_RATE, I2S_BITS_PER_SAMPLE_16BIT, I2S_CHANNEL_MONO);
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE, &i2s_bytes_write, 500);
i2s_driver_uninstall(I2S_NUM_0); //stop & destroy i2s driver
}
void loop()
{
i2s_driver_install(I2S_NUM_0, &i2s_config, 0, NULL);
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE, &i2s_bytes_write, 500);
delay(100);
i2s_driver_uninstall(I2S_NUM_0);
delay(10);
}
The code uploads and runs OK but I still get no signal on pin 25. I also looked on pin 26 (DAC1) but that seems to be used by LoRa_IRQ. Can anyone help me out?
First of all, take a look at how you've set up your pins
#define I2S_BCK_IO (GPIO_NUM_26)
#define I2S_WS_IO (GPIO_NUM_25)
#define I2S_DO_IO (GPIO_NUM_22)
#define I2S_DI_IO (-1)
According to this specification, pin 26 will output a clock signal, pin 25 will output the line selector (left or right), and pin 22 will output the serial data corresponding to the audio you're sending to your DAC.
.sample_rate = SAMPLE_RATE,
.bits_per_sample = I2S_BITS_PER_SAMPLE_16BIT,
Now you've set up your sample rate to 22050Hz, and your bit depth to 16 bits. So on pin 26 and 22 you should be getting a periodic signal of 22kHz, and a periodic signal of 22kHz/16 on pin 25.
Now to your problem. First, the ESP32 board has two 8-bit internal DACs, and they'll output an analog signal with a 8-bit depth. So in reality, pin 22 should be outputting an analog signal. Let's take a look at your code:
i2s_set_pin(I2S_NUM_0, &pin_config);
The I2s specification is a 3-line bus specification for audio communication. Since you're using the internal DAC, you don't need these three lines and setting their pins will make the driver assume you want to use them (meaning the DAC pin won't be activated).
//i2s_set_pin(I2S_NUM_0, NULL);
Uncomment this and the driver will assume you want to use the internal DAC.
.bits_per_sample = I2S_BITS_PER_SAMPLE_16BIT,
Again, since the internal DAC only takes 8 bits per sample, the driver only takes the 8 most significant bits. You can set this to 8 bits and avoid any problems.
void setup() {
pinMode(26, OUTPUT);
Serial.begin(115200);
i2s_driver_install(I2S_NUM_0, &i2s_config, 0, NULL);
//i2s_set_pin(I2S_NUM_0, NULL);
i2s_set_pin(I2S_NUM_0, &pin_config);
i2s_set_dac_mode(I2S_DAC_CHANNEL_RIGHT_EN);
i2s_set_sample_rates(I2S_NUM_0, 22050); //set sample rates
setup_sine_wave();
i2s_set_clk(I2S_NUM_0, SAMPLE_RATE, I2S_BITS_PER_SAMPLE_16BIT, I2S_CHANNEL_MONO);
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE, &i2s_bytes_write, 500);
i2s_driver_uninstall(I2S_NUM_0); //stop & destroy i2s driver
}
In your setup function you're installing the I2s driver, then setting the pins for an I2s communication with an external DAC, setting the sample rate to 22050, resetting them to 22050 again, writing 1 cycle of your sine wave, then uninstalling the driver. After you uninstall the driver, it is useless to output anything. Here's a more appropriate approach:
void setup() {
Serial.begin(115200);
i2s_driver_install(I2S_NUM_0, &i2s_config, 0, NULL);
i2s_set_pin(I2S_NUM_0, NULL);
i2s_set_dac_mode(I2S_DAC_CHANNEL_BOTH_EN); // You also might be sending data to the wrong channel, so use both.
setup_sine_wave();
}
Now the loop function:
void loop()
{
i2s_driver_install(I2S_NUM_0, &i2s_config, 0, NULL);
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE, &i2s_bytes_write, 500);
delay(100);
i2s_driver_uninstall(I2S_NUM_0);
delay(10);
}
You don't need to install and uninstall the I2s driver, nor delay the outer function, since the write function writes to a buffer that is consumed in the specified sample rate.
void loop()
{
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE, &i2s_bytes_write, 500);
}
This is all you need, theorically. But there's a major problem. You defined your audio buffer as an int-array (a list of 32 bit signed values). Then you defined the size of this buffer as 4000
#define SAMPLE_SIZE 4000
In your write function, the buffer size parameter expects the size in bytes of your buffer, kinda like when you use the malloc function. Since each sample in your buffer has 4 bytes, you're only giving 1/4 of your buffer to the write function. In the end, you're not outputting a sine wave, but a 1/4 of a sine wave.
void loop()
{
i2s_write(I2S_NUM_0, &sample_data, SAMPLE_SIZE * sizeof(int), &i2s_bytes_write, 500);
}
Should do the trick.
I haven't tested this code, but hopefully my explanation will give you some directions as to debug your code.
I2s driver documentation has some code samples that you can check aswell.
as the title already says, I want to continuous record raw audio through my microphone.
So the idea was running a simple C program in the background as service that would create chunks of audio and send those files through the sphinx speech recognition.
After that I can do some processing with the recognized words.
The problem is the (continuous) recognition. I can't just record audio chunks containing 10 seconds what i've said, because maybe chunk[33] -> chunk[34] belong together and then sphinx would output something like:
recognized chunk[33] -> ["enable light"]
recognized chunk[34] -> ["5 with 50 percent"]
Another approach would be to continuous record audio but then I can't process big audio files with sphinx.
I'm using the basic example from pocketsphinx:
#include <pocketsphinx.h>
int main(int argc, char *argv[])
{
ps_decoder_t *ps;
cmd_ln_t *config;
FILE *fh;
char const *hyp, *uttid;
int16 buf[512];
int rv;
int32 score;
config = cmd_ln_init(NULL, ps_args(), TRUE,
"-hmm", MODELDIR "/en-us/en-us",
"-lm", MODELDIR "/en-us/en-us.lm.bin",
"-dict", MODELDIR "/en-us/cmudict-en-us.dict",
NULL);
if (config == NULL) {
fprintf(stderr, "Failed to create config object, see log for details\n");
return -1;
}
ps = ps_init(config);
if (ps == NULL) {
fprintf(stderr, "Failed to create recognizer, see log for details\n");
return -1;
}
fh = fopen("audiochunk_33.raw", "rb");
if (fh == NULL) {
fprintf(stderr, "Unable to open input file goforward.raw\n");
return -1;
}
rv = ps_start_utt(ps);
while (!feof(fh)) {
size_t nsamp;
nsamp = fread(buf, 2, 512, fh);
rv = ps_process_raw(ps, buf, nsamp, FALSE, FALSE);
}
rv = ps_end_utt(ps);
hyp = ps_get_hyp(ps, &score);
printf("Recognized: %s\n", hyp);
fclose(fh);
ps_free(ps);
cmd_ln_free_r(config);
return 0;
}
And here is a basic example using ffmpeg to create a simple audio file/chunk:
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#define N 44100
void main()
{
// Create audio buffer
int16_t buf[N] = {0}; // buffer
int n; // buffer index
double Fs = 44100.0; // sampling frequency
// Generate 1 second of audio data - it's just a 1 kHz sine wave
for (n=0 ; n<N ; ++n) buf[n] = 16383.0 * sin(n*1000.0*2.0*M_PI/Fs);
// Pipe the audio data to ffmpeg, which writes it to a wav file
FILE *pipeout;
pipeout = popen("ffmpeg -y -f s16le -ar 44100 -ac 1 -i - beep.wav", "w");
fwrite(buf, 2, N, pipeout);
pclose(pipeout);
}
BR
Michael
I'm trying to write a fullduplex test that copies audio in to audio out. sio_onmove does not get called. I have no idea why. Here's my code so far:
#include <stdio.h>
#include <stdlib.h>
#include <sndio.h>
unsigned char buf[0xffff];
struct sio_hdl *hdl;
void cb(void *arg, int delta) {
int l;
printf("call %d\n", delta);
for(;;) {
l = sio_read(hdl, buf, delta);
if(l==0) break;
sio_write(hdl, buf, l);
}
}
int main(void) {
int m, i;
struct sio_par par;
struct sio_cap cap;
hdl = sio_open("rsnd/0", SIO_PLAY | SIO_REC , 1);
sio_getcap(hdl, &cap);
sio_initpar( &par);
par.bits = cap.enc[0].bits;
par.bps = cap.enc[0].bps;
par.sig = cap.enc[0].sig;
par.le = cap.enc[0].le;
par.msb = cap.enc[0].msb;
par.rchan=cap.rchan[0];
par.pchan=cap.pchan[0];
par.rate =cap.rate[0];
par.appbufsz = 1024;
sio_setpar(hdl, &par);
sio_onmove(hdl, cb, NULL);
sio_start(hdl);
for(;;)
sleep(1);
}
I'm initializing rsnd/0 for recording and play back. The parameters I'm initializing from a getcap call. I'm then setting cb as the callback for onmove. I then start audio. From there I loop forever doing nothing
The sio_onmove() call-back is called either from sio_revents() if non-blocking i/o is used or from blocking sio_read() or sio_write().
As above program calls sleep(1) instead, the call-back is never called.
AFAIU, to do the full-duplex test, you could use blocking i/o (set to 0 last argument of the sio_open() function) and do the following steps:
call sio_initpar() to initialize a sio_par structure, as you do
set your preferred parameters in the sio_par structure
call sio_setpar() to submit them to the device. devices exposed through the server (ex. "snd/0") will accept any parameters, while raw devices (ex. "rsnd/0") pick something close to whatever the hardware supports.
call sio_getpar() to get the parameters the device accepted, this is needed to get the device buffer size
possibly check if they are usable by your program
call sio_start()
prime the play buffer by writing par.bufsz samples with sio_write(). This corresponds to: par.bufsz * par.pchan * par.bps bytes.
At this stage, device starts and you could do the main-loop as with the following pseudo-code:
unsigned char *data;
size_t n, todo, blksz;
blksz = par.round * par.rchan * par.bps;
for (;;) {
/* read one block */
data = buf;
todo = blksz;
while (todo > 0) {
n = sio_read(hdl, data, todo);
if (n == 0)
errx(1, "failed");
todo -= n;
data += n;
}
/* write one block */
n = sio_write(hdl, buf, blksz);
if (n != blksz)
errx(1, "failed");
}
The sio_onmove() call-back is not needed for pure audio programs. It's only useful to synchronize non-audio events (ex video, midi messages) to the audio stream.
I'm attempting to stream audio for my C++ application which uses SDL through the mingw32 environment. From my understanding it's a fairly simple affair:
extern "C" void audioStep(void* unused, Uint8* stream, int len);
void initAudio()
{
SDL_AudioSpec* fmt;
fmt = (SDL_AudioSpec*)malloc(sizeof(SDL_AudioSpec));
fmt->freq = 22050;
fmt->format = AUDIO_S16;
fmt->channels = 1;
fmt->samples = 8192;
fmt->callback = audioStep;
fmt->userdata = NULL;
SDL_AudioSpec obFmt;
if (SDL_OpenAudio(fmt, &obFmt) < 0)
{
fprintf(stderr, "Unable to open audio: %s\n", SDL_GetError());
getchar();
exit(1);
}
SDL_PauseAudio(0);
}
.
.
.
extern "C" void audioStep(void* unused, Uint8* stream, int len)
{
// Do stuff.
}
The issue I'm experiencing is that audioStep never seems to be called. Before initAudio is run SDL_Init is called with SDL_INIT_EVERYTHING. Then graphics are fully initialized (SDL_SetVideoMode and such) and then the audio system is initialized with the code above.
Is it possible somehow I compiled SDL without audio support? (Is there a way to check if audio is enabled or if it's using some sort of null audio device?)
I have working code in SDL.NET, so maybe this will help. I found that I had to have a Video window opened:
SdlDotNet.Graphics.Video.SetVideoMode(1, 1);
SdlDotNet.Audio.Mixer.OpenAudio(stream);
// BUG: close (or hide) it
SdlDotNet.Graphics.Video.Close();
Otherwise, it would not initialize correctly.
Why #audioStep# should be called if you never Play a Sound? There's nothing to do.
Using SDL Sound