I have the below code to play a particular sound loaded into a buffer.
sound1.gainNode = context.createGain();
sound1.source = context.createBufferSource();
sound1.source.buffer = soundBuffers[num];
sound1.source.connect(sound1.gainNode);
sound1.gainNode.connect(context.destination);
sound1.source.looping = true;
sound1.source.start(0);
Here's how I call the change volume method:
<input type="range" min="0" max="100" value="70" id="playBtn1_vol" onchange="changeVolume(this,sound1Gain)" style="display:none">
Here's the change volume method:
function changeVolume = function(element,soundNo){
var volume = element.value;
var fraction = parseInt(element.value) / parseInt(element.max);
// Using an x^2 progression as it gives a better sound than linear.
soundNo.gainNode.gain.value = fraction * fraction;
};
Before I tried to get the volume working using gain, it was playing just fine but now its broken and I can't find what's wrong with it. If anyone could point me in the right direction it would be very much appreciated.
Here's sample code (js) which should help you out. Reference
var VolumeSample = {
};
// Gain node needs to be mutated by volume control.
VolumeSample.gainNode = null;
VolumeSample.play = function() {
if (!context.createGain)
context.createGain = context.createGainNode;
this.gainNode = context.createGain();
var source = context.createBufferSource();
source.buffer = BUFFERS.techno;
// Connect source to a gain node
source.connect(this.gainNode);
// Connect gain node to destination
this.gainNode.connect(context.destination);
// Start playback in a loop
source.loop = true;
if (!source.start)
source.start = source.noteOn;
source.start(0);
this.source = source;
};
VolumeSample.changeVolume = function(element) {
var volume = element.value;
var fraction = parseInt(element.value) / parseInt(element.max);
// Let's use an x*x curve (x-squared) since simple linear (x) does not
// sound as good.
this.gainNode.gain.value = fraction * fraction;
};
VolumeSample.stop = function() {
if (!this.source.stop)
this.source.stop = source.noteOff;
this.source.stop(0);
};
VolumeSample.toggle = function() {
this.playing ? this.stop() : this.play();
this.playing = !this.playing;
};
this renders web audio using a volume widget - just update mp3 with path to an actual file or some URL pointing to such
<html>
<head>
<title>render audio with volume control</title>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
<script>
var audio_context = null;
var gain_node = null;
window.AudioContext = window.AudioContext || window.webkitAudioContext;
audio_context = new AudioContext();
gain_node = audio_context.createGain(); // Declare gain node
gain_node.connect(audio_context.destination); // Connect gain node to speakers
function render_audio() {
var request = new XMLHttpRequest();
var audio_url = "put_your_music_file_here.mp3";
request.open('GET', audio_url, true); // loading local file for now
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
audio_context.decodeAudioData(request.response, function(buffer) {
stored_buffer = buffer; // store buffer for replay later
var source = audio_context.createBufferSource(); // creates a sound source
source.buffer = buffer; // tell the source which sound to play
source.connect(gain_node); // connect source to speakers
source.start(0); // play the source now
});
};
request.send();
}
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// init
render_audio();
</script>
<body onkeypress="render_audio()">
<button onclick="render_audio()">play_again</button>
</body>
</html>
I resolved this issue, I think it was to do with the ordering of the lines or the labels or something. I had this:
sound1.gainNode = context.createGain();
sound1.source = context.createBufferSource();
sound1.source.buffer = soundBuffers[num];
sound1.source.connect(sound1.gainNode);
sound1.gainNode.connect(context.destination);
sound1.source.looping = true;
sound1.source.start(0);
I updated it to this:
source1 = context.createBufferSource();
gain1 = context.createGain();
gain1.gain.value=parseFloat(document.getElementById('playBtn'+num+'_vol').value);our currently playing sound.
source1.buffer = soundBuffers[num];
source1.connect(gain1);
gain1.connect(context.destination);
source1.looping = true;
source1[source1.start ? 'start' : 'noteOn'](0);
And now it works. Might have been a rookie mistake, but hopefully this'll help someone else starting out with Web Audio.
Related
I'm trying to broadcast captured mic audio across a websocket. I can see the buffer array is being sent, and the array has actual valid data but the receiving client side cannot hear it. I'm pretty sure my playback function is correct, because I can generate white noise by filling an array with random numbers and using the playback function to hear it. I'm thinking maybe the audio it's broadcasting is too quiet to hear, because the numbers generated in the array are seem to mostly be in the .000### range. Any ideas? Capturing mic audio and broadcasting it seems to be over complicated... :/
//broadcasting side
navigator.mediaDevices.getUserMedia({audio: true,video: false}) // request cam
.then(stream => {
vid.srcObject = stream;
context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 2, 2);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
audiodata = e.inputBuffer.getChannelData(1);
socket.send(JSON.stringify({sound: audiodata, to: to, from: '$username', text:''}));
};
return vid.play(); // returns a Promise
});
//receiving side object to array
if(typeof (message.sound) != "undefined"){
//$('#video_stream_btn').trigger('click');
var json_sound = message.sound;
var array_sound = [];
for(var i in json_sound){
array_sound.push([i, json_sound [i]]);
}
if(typeof(context) == 'undefined'){
context = new AudioContext();
}
play_sound(array_sound, context);
return;
}
// receiving side play sound function
function play_sound(raw,context){
//alert(raw.length);
var audioBuffer = context.createBuffer(1, raw.length, context.sampleRate);
audioBuffer.getChannelData(0).set(raw);
var source = context.createBufferSource();
source.buffer = audioBuffer;
source.connect(context.destination);
source.start(0);
}
For anyone out there trying to figure this out. I ended up encoding it to an int16array, then sent it across the socket, where the client encoded it back into a float32 array and passed it to the play_sound function. I basically just stole a bunch of stuff off stackoverflow and faked it until I made it, cause I'm not that smart :)
capturing mic and converting to int16array, then sending it across the socket
navigator.mediaDevices.getUserMedia({audio: {sampleSize: 16, channelCount: 2},video: true}) // request cam
.then(stream => {
vid.srcObject = stream; // don't use createObjectURL(MediaStream)
context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 2, 2);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
// Do something with the data, i.e Convert this to WAV
audiodata = new Int16Array(convertFloat32ToInt16(e.inputBuffer.getChannelData(0)));
console.log(audiodata);
socket.send(JSON.stringify({sound: audiodata, to: to, from: '$username', text:''}));
};
return vid.play(); // returns a Promise
});
relevant function for converting captured mic to int16array:
function convertFloat32ToInt16(buffer){
l = buffer.length;
buf = new Int16Array(l);
while (l--)
{
buf[l] = Math.min(1, buffer[l])*0x7FFF;
}
return buf.buffer;
}
receiving client side json object to int16array, then int16array back to float32array:
if(typeof (message.sound) != "undefined"){
//$('#video_stream_btn').trigger('click');
//var json_sound = message.sound;
if(typeof(context) == 'undefined'){
context = new AudioContext();
}
sound_array = [];
for (i in message.sound)
{
sound_array[i] = (message.sound [i]);
}
//sound_array16 = new Int16Array(sound_array);
sound_array32 = int16ToFloat32(sound_array);
play_sound(sound_array32, context);
return;
}
relevant receiving side int16array to float32array function:
function int16ToFloat32(inputArray) {
let int16arr = new Int16Array(inputArray)
var output = new Float32Array(int16arr.length);
for (var i = 0; i < int16arr.length; i++) {
var int = int16arr[i];
var float = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF;
output[i] = float;
}
return output;
}
I've been banging my head against a wall on this for two days now, and I really hope someone can help on this.
I've taken some code for a getUserMedia microphone recorder from https://higuma.github.io/wav-audio-encoder-js/ + https://github.com/higuma/wav-audio-encoder-js here. I've stripped out the components I don't need - and somehow, in the process, I've managed to make it so that there is no audio coming through on the generated file.
It looks like it formats correctly - but is completely silent. I'm getting 0 errors to work from.
// navigator.getUserMedia shim
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
// URL shim
window.URL = window.URL || window.webkitURL;
// audio context + .createScriptProcessor shim
var audioContext = new AudioContext;
if (audioContext.createScriptProcessor == null) {
audioContext.createScriptProcessor = audioContext.createJavaScriptNode;
}
// selectors
var $microphone = $('#microphone');
var $cancel = $('#cancel');
var $recordingList = $('#recording-list');
var $timeDisplay = $('#time-display');
var $microphoneLevel = $('#microphone-level');
var microphone = undefined;
var input = audioContext.createGain();
var mixer = audioContext.createGain();
var microphoneLevel = audioContext.createGain();
microphoneLevel.gain.value = 0;
microphoneLevel.connect(mixer);
var processor = undefined;
var startTime = null;
var encoder = undefined;
// obtaining microphone input
$microphone.click(function() {
navigator.getUserMedia({ audio: true },
function(stream) {
microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(microphoneLevel);
console.log(microphone);
},
function(error) {
window.alert("Could not get audio input");
});
});
// start/stop recording
$microphone.click(function() {
if (startTime != null) {
stopRecording(true);
} else {
startRecording();
}
});
// cancel recording (without saving)
$cancel.click(function() {
stopRecording(false);
});
// microphone level slider
$microphoneLevel.on('input', function() {
var level = $microphoneLevel[0].valueAsNumber / 100;
microphoneLevel.gain.value = level * level;
});
function startRecording() {
startTime = Date.now();
$microphone.html('Stop');
$cancel.removeClass("hidden");
startRecordingProcess();
}
function startRecordingProcess() {
processor = audioContext.createScriptProcessor(1024, 2, 2);
input.connect(processor);
processor.connect(audioContext.destination);
// wav encoder
encoder = new WavAudioEncoder(audioContext.sampleRate, 2);
processor.onaudioprocess = function(event) {
encoder.encode(getBuffers(event));
};
}
function getBuffers(event) {
var buffers = [];
for (var ch = 0; ch < 2; ++ch) {
buffers[ch] = event.inputBuffer.getChannelData(ch);
}
return buffers;
}
function stopRecording(finish) {
startTime = null;
$timeDisplay.html('00:00');
$microphone.html('<i class="start fa fa-microphone fa-5x" aria-hidden="true"></i>');
$cancel.addClass('hidden');
stopRecordingProcess(finish);
}
function stopRecordingProcess(finish) {
input.disconnect();
processor.disconnect();
if (finish) { // if microphone pressed
saveRecording(encoder.finish());
} else { // if cancel pressed
encoder.cancel();
}
}
function saveRecording(blob) {
var url = URL.createObjectURL(blob);
var html = "<p class='recording' recording='" + url + "'><a class='btn btn-default' href='" + url + "' download='recording.wav'>Save Recording</a></p>";
$recordingList.prepend($(html));
// once we have done all the processing, upload the file to beyond verbal
// uploadFile(blob);
}
// update the recording timer
function minuteSeconds(n) { return (n < 10 ? "0" : "") + n; }
function updateDateTime() {
if (startTime !== null) {
var sec = Math.floor((Date.now() - startTime) / 1000);
$timeDisplay.html(minuteSeconds(sec / 60 | 0) + ":" + minuteSeconds(sec % 60));
}
}
window.setInterval(updateDateTime, 200);
If anyone has run into this before, I'd be really appreciative of a fix.
Thank you all for your time, and have a nice day/night
First check your microphone with general recording demo.
If its working you can try passing only Audio Stream & required mime type to media recorder for basic audio recording.
If you want to play with this webaudio context,
Am suspecting issue with microphoneLevel.gain.value = 0;
change it to microphoneLevel.gain.value = 1; //or 2
gain = 0 means we are muting the audio.
gain = 1 default audio level
gain = 0.1 - 0.9 is reducing volume level
gain = above 1.1 increasing the volume level
print the level values in console on
// microphone level slider
$microphoneLevel.on('input', function() {
var level = $microphoneLevel[0].valueAsNumber / 100;
console.log('value: ' + $microphoneLevel[0].valueAsNumber + ' Level: ' + level);
microphoneLevel.gain.value = level * level; // if level is zero, then its silent
// its better if you have a predefined level values based slider position instead of multiplying it
});
See my demo and source
Something strange is happening here.
I create a audio buffer store it in a variable and try to re-use it several times - but it seems to be corrupted
I make some buttons
<button onclick="play();">play(0)</button>
<button onclick="playsection();">play section</button>
<button onclick="stop();">stop()</button>
Get some audio data
context = new AudioContext();
var getWav = new XMLHttpRequest();
var wavbuf;
getWav.open("GET", "/wav/test.wav", true);
getWav.responseType = "arraybuffer";
getWav.onload = function() {
context.decodeAudioData(getWav.response, function(buffer){
wavbuf = buffer;
});
}
getWav.send();
var p;
I can evaluate play() multiple times without an error
function play(){
p = context.createBufferSource();
p.buffer = wavbuf;
p.connect(context.destination);
p.start(0);
}
playsection seems to work only once - or occasionally more than once if
I press stop before stop(10) evaluates
function playsection(){
p = context.createBufferSource();
p.buffer = wavbuf;
p.connect(context.destination);
p.start(0, 6); // start after 6 seconds
p.stop(10); // stop after 10 seconds
}
function stop(){
p.stop();
}
Seems like p.buffer = wavbuf creates a pointer into the buffer rather than a copy
Is this a bug or a feature?
So this is interesting, it will play the section consistently either without the stop:
function playsection(){
p = context.createBufferSource();
p.buffer = wavbuf;
p.connect(context.destination);
p.start(0, 6); // start after 6 seconds
}
or without the offset:
function playsection(){
p = context.createBufferSource();
p.buffer = wavbuf;
p.connect(context.destination);
p.start(0);
p.stop(10); // stop after 10 seconds
}
and even declaring the offset and duration within the start:
function playsection(){
p = context.createBufferSource();
p.buffer = wavbuf;
p.connect(context.destination);
p.start(0,6,10);
}
I have some hello world happening with leap motion using nodeJS
I want to know the pitch value for a hand, but is returning "undefined"
var Leap = require('leapjs');
var webSocket = require('ws'),
ws = new webSocket('ws://127.0.0.1:6437');
ws.on('message', function(data, flags) {
frame = JSON.parse(data);
var controller = Leap.loop(function(frame){
if(frame.hands.length == 1)
{
var hand = frame.hands[0];
var position = hand.palmPosition;
var velocity = hand.palmVelocity;
var direction = hand.direction;
var finger = hand.fingers[0];
var pitch = direction.pitch;
var type = hand.type;
if(type == "left"){
console.log("Left hand.");
console.log("pitch ::"+pitch);
}
if(type == "right") {
console.log("Right hand.")
}
}
});
if (frame.hands && frame.hands.length > 1) {
console.log("two");
}
if (frame.hands && frame.hands.length == 0) {
console.log("apaga too");
}
});
So, when i log my left hand i get
Left hand.
pitch ::undefined
I believe pitch is a method belonging to hand not direction, not an attribute belonging to direction docs. Have you tried hand.pitch() ?
All I want to do is play a WAV at random pitches in my Universal App. If there is a more straight forward way to do this than needed SharpDX tell me!
private void PlayTone(float randomPitch)
{
var xAudio = new XAudio2();
var masteringVoice = new MasteringVoice(xAudio);
var nativeFileStream = new NativeFileStream("Assets/440tone2.wav", NativeFileMode.Open, NativeFileAccess.Read, NativeFileShare.Read);
var stream = new SoundStream(nativeFileStream);
var waveFormat = stream.Format;
var buffer = new AudioBuffer
{
Stream = stream.ToDataStream(),
AudioBytes = (int)stream.Length,
Flags = BufferFlags.EndOfStream
};
var sourceVoice = new SourceVoice(xAudio, waveFormat, true);
sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
// sourceVoice.SetFrequencyRatio(200.0f);
sourceVoice.Start();
}
I had to switch these around
sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
sourceVoice.SetFrequencyRatio(200.0f);